id
int64 0
328k
| repository_name
stringlengths 7
58
| file_path
stringlengths 9
302
| class_name
stringlengths 5
256
| human_written_code
stringlengths 16
2.16M
| class_skeleton
stringlengths 18
1.49M
⌀ | total_program_units
int64 1
1.76k
| total_doc_str
int64 0
771
| AvgCountLine
float64 0
7.89k
| AvgCountLineBlank
float64 0
297
| AvgCountLineCode
float64 0
7.89k
| AvgCountLineComment
float64 0
7.89k
| AvgCyclomatic
float64 0
130
| CommentToCodeRatio
float64 0
168
| CountClassBase
float64 0
40
| CountClassCoupled
float64 0
583
| CountClassCoupledModified
float64 0
575
| CountClassDerived
float64 0
5.35k
| CountDeclInstanceMethod
float64 0
529
| CountDeclInstanceVariable
float64 0
296
| CountDeclMethod
float64 0
599
| CountDeclMethodAll
float64 0
1.12k
| CountLine
float64 1
40.4k
| CountLineBlank
float64 0
8.16k
| CountLineCode
float64 1
25.7k
| CountLineCodeDecl
float64 1
8.15k
| CountLineCodeExe
float64 0
24.2k
| CountLineComment
float64 0
16.5k
| CountStmt
float64 1
9.71k
| CountStmtDecl
float64 1
8.15k
| CountStmtExe
float64 0
9.69k
| MaxCyclomatic
float64 0
759
| MaxInheritanceTree
float64 0
16
| MaxNesting
float64 0
34
| SumCyclomatic
float64 0
2.9k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3,900
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mobilenet_v2/image_processing_mobilenet_v2.py
|
transformers.models.mobilenet_v2.image_processing_mobilenet_v2.MobileNetV2ImageProcessor
|
import numpy as np
from ...image_transforms import get_resize_output_image_size, resize, to_channel_dimension_format
from ...image_utils import IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, infer_channel_dimension_format, is_scaled_image, make_flat_list_of_images, to_numpy_array, valid_images, validate_preprocess_arguments
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...utils import TensorType, filter_out_non_signature_kwargs, is_torch_available, is_torch_tensor, logging
from typing import Optional, Union
from ...utils.import_utils import requires
@requires(backends=('vision',))
class MobileNetV2ImageProcessor(BaseImageProcessor):
"""
Constructs a MobileNetV2 image processor.
Args:
do_resize (`bool`, *optional*, defaults to `True`):
Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by
`do_resize` in the `preprocess` method.
size (`dict[str, int]` *optional*, defaults to `{"shortest_edge": 256}`):
Size of the image after resizing. The shortest edge of the image is resized to size["shortest_edge"], with
the longest edge resized to keep the input aspect ratio. Can be overridden by `size` in the `preprocess`
method.
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`):
Resampling filter to use if resizing the image. Can be overridden by the `resample` parameter in the
`preprocess` method.
do_center_crop (`bool`, *optional*, defaults to `True`):
Whether to center crop the image. If the input size is smaller than `crop_size` along any edge, the image
is padded with 0's and then center cropped. Can be overridden by the `do_center_crop` parameter in the
`preprocess` method.
crop_size (`dict[str, int]`, *optional*, defaults to `{"height": 224, "width": 224}`):
Desired output size when applying center-cropping. Only has an effect if `do_center_crop` is set to `True`.
Can be overridden by the `crop_size` parameter in the `preprocess` method.
do_rescale (`bool`, *optional*, defaults to `True`):
Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale`
parameter in the `preprocess` method.
rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the
`preprocess` method.
do_normalize:
Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess`
method.
image_mean (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`):
Mean to use if normalizing the image. This is a float or list of floats the length of the number of
channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
image_std (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`):
Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
do_reduce_labels (`bool`, *optional*, defaults to `False`):
Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is
used for background, and background itself is not included in all classes of a dataset (e.g. ADE20k). The
background label will be replaced by 255. Can be overridden by the `do_reduce_labels` parameter in the
`preprocess` method.
"""
model_input_names = ['pixel_values']
def __init__(self, do_resize: bool=True, size: Optional[dict[str, int]]=None, resample: PILImageResampling=PILImageResampling.BILINEAR, do_center_crop: bool=True, crop_size: Optional[dict[str, int]]=None, do_rescale: bool=True, rescale_factor: Union[int, float]=1 / 255, do_normalize: bool=True, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, do_reduce_labels: bool=False, **kwargs) -> None:
super().__init__(**kwargs)
size = size if size is not None else {'shortest_edge': 256}
size = get_size_dict(size, default_to_square=False)
crop_size = crop_size if crop_size is not None else {'height': 224, 'width': 224}
crop_size = get_size_dict(crop_size, param_name='crop_size')
self.do_resize = do_resize
self.size = size
self.resample = resample
self.do_center_crop = do_center_crop
self.crop_size = crop_size
self.do_rescale = do_rescale
self.rescale_factor = rescale_factor
self.do_normalize = do_normalize
self.image_mean = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
self.image_std = image_std if image_std is not None else IMAGENET_STANDARD_STD
self.do_reduce_labels = do_reduce_labels
def resize(self, image: np.ndarray, size: dict[str, int], resample: PILImageResampling=PILImageResampling.BICUBIC, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray:
"""
Resize an image. The shortest edge of the image is resized to size["shortest_edge"], with the longest edge
resized to keep the input aspect ratio.
Args:
image (`np.ndarray`):
Image to resize.
size (`dict[str, int]`):
Size of the output image.
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
Resampling filter to use when resiizing the image.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the image. If not provided, it will be the same as the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
"""
default_to_square = True
if 'shortest_edge' in size:
size = size['shortest_edge']
default_to_square = False
elif 'height' in size and 'width' in size:
size = (size['height'], size['width'])
else:
raise ValueError("Size must contain either 'shortest_edge' or 'height' and 'width'.")
output_size = get_resize_output_image_size(image, size=size, default_to_square=default_to_square, input_data_format=input_data_format)
return resize(image, size=output_size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs)
def reduce_label(self, label: ImageInput) -> np.ndarray:
label = to_numpy_array(label)
label[label == 0] = 255
label = label - 1
label[label == 254] = 255
return label
def __call__(self, images, segmentation_maps=None, **kwargs):
"""
Preprocesses a batch of images and optionally segmentation maps.
Overrides the `__call__` method of the `Preprocessor` class so that both images and segmentation maps can be
passed in as positional arguments.
"""
return super().__call__(images, segmentation_maps=segmentation_maps, **kwargs)
def _preprocess(self, image: ImageInput, do_reduce_labels: bool, do_resize: bool, do_rescale: bool, do_center_crop: bool, do_normalize: bool, size: Optional[dict[str, int]]=None, resample: Optional[PILImageResampling]=None, rescale_factor: Optional[float]=None, crop_size: Optional[dict[str, int]]=None, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None):
if do_reduce_labels:
image = self.reduce_label(image)
if do_resize:
image = self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format)
if do_center_crop:
image = self.center_crop(image=image, size=crop_size, input_data_format=input_data_format)
if do_rescale:
image = self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
if do_normalize:
image = self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format)
return image
def _preprocess_image(self, image: ImageInput, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, resample: Optional[PILImageResampling]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_center_crop: Optional[bool]=None, crop_size: Optional[dict[str, int]]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray:
"""Preprocesses a single image."""
image = to_numpy_array(image)
if do_rescale and is_scaled_image(image):
logger.warning_once('It looks like you are trying to rescale already rescaled images. If the input images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again.')
if input_data_format is None:
input_data_format = infer_channel_dimension_format(image)
image = self._preprocess(image=image, do_reduce_labels=False, do_resize=do_resize, size=size, resample=resample, do_rescale=do_rescale, rescale_factor=rescale_factor, do_center_crop=do_center_crop, crop_size=crop_size, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, input_data_format=input_data_format)
image = to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format)
return image
def _preprocess_mask(self, segmentation_map: ImageInput, do_reduce_labels: Optional[bool]=None, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, do_center_crop: Optional[bool]=None, crop_size: Optional[dict[str, int]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray:
"""Preprocesses a single mask."""
segmentation_map = to_numpy_array(segmentation_map)
if segmentation_map.ndim == 2:
added_channel_dim = True
segmentation_map = segmentation_map[None, ...]
input_data_format = ChannelDimension.FIRST
else:
added_channel_dim = False
if input_data_format is None:
input_data_format = infer_channel_dimension_format(segmentation_map, num_channels=1)
segmentation_map = self._preprocess(image=segmentation_map, do_reduce_labels=do_reduce_labels, do_resize=do_resize, size=size, resample=PILImageResampling.NEAREST, do_rescale=False, do_center_crop=do_center_crop, crop_size=crop_size, do_normalize=False, image_mean=None, image_std=None, input_data_format=input_data_format)
if added_channel_dim:
segmentation_map = segmentation_map.squeeze(0)
segmentation_map = segmentation_map.astype(np.int64)
return segmentation_map
@filter_out_non_signature_kwargs()
def preprocess(self, images: ImageInput, segmentation_maps: Optional[ImageInput]=None, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, resample: Optional[PILImageResampling]=None, do_center_crop: Optional[bool]=None, crop_size: Optional[dict[str, int]]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, do_reduce_labels: Optional[bool]=None, return_tensors: Optional[Union[str, TensorType]]=None, data_format: Union[str, ChannelDimension]=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None):
"""
Preprocess an image or batch of images.
Args:
images (`ImageInput`):
Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
passing in images with pixel values between 0 and 1, set `do_rescale=False`.
segmentation_maps (`ImageInput`, *optional*):
Segmentation map to preprocess.
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
Whether to resize the image.
size (`dict[str, int]`, *optional*, defaults to `self.size`):
Size of the image after resizing. Shortest edge of the image is resized to size["shortest_edge"], with
the longest edge resized to keep the input aspect ratio.
resample (`PILImageResampling` filter, *optional*, defaults to `self.resample`):
`PILImageResampling` filter to use if resizing the image e.g. `PILImageResampling.BILINEAR`. Only has
an effect if `do_resize` is set to `True`.
do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`):
Whether to center crop the image.
crop_size (`dict[str, int]`, *optional*, defaults to `self.crop_size`):
Size of the center crop. Only has an effect if `do_center_crop` is set to `True`.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the image values between [0 - 1].
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
Whether to normalize the image.
image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`):
Image mean to use if `do_normalize` is set to `True`.
image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`):
Image standard deviation to use if `do_normalize` is set to `True`.
do_reduce_labels (`bool`, *optional*, defaults to `self.do_reduce_labels`):
Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0
is used for background, and background itself is not included in all classes of a dataset (e.g.
ADE20k). The background label will be replaced by 255.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- Unset: Use the channel dimension format of the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
"""
do_resize = do_resize if do_resize is not None else self.do_resize
size = size if size is not None else self.size
size = get_size_dict(size, default_to_square=False)
do_reduce_labels = do_reduce_labels if do_reduce_labels is not None else self.do_reduce_labels
resample = resample if resample is not None else self.resample
do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop
crop_size = crop_size if crop_size is not None else self.crop_size
crop_size = get_size_dict(crop_size, param_name='crop_size')
do_rescale = do_rescale if do_rescale is not None else self.do_rescale
rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
do_normalize = do_normalize if do_normalize is not None else self.do_normalize
image_mean = image_mean if image_mean is not None else self.image_mean
image_std = image_std if image_std is not None else self.image_std
images = make_flat_list_of_images(images)
if segmentation_maps is not None:
segmentation_maps = make_flat_list_of_images(segmentation_maps, expected_ndims=2)
if not valid_images(images):
raise ValueError('Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, or torch.Tensor')
if segmentation_maps is not None and (not valid_images(segmentation_maps)):
raise ValueError('Invalid segmentation map type. Must be of type PIL.Image.Image, numpy.ndarray, or torch.Tensor')
validate_preprocess_arguments(do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_center_crop=do_center_crop, crop_size=crop_size, do_resize=do_resize, size=size, resample=resample)
images = [self._preprocess_image(image=img, do_resize=do_resize, size=size, resample=resample, do_rescale=do_rescale, rescale_factor=rescale_factor, do_center_crop=do_center_crop, crop_size=crop_size, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, data_format=data_format, input_data_format=input_data_format) for img in images]
data = {'pixel_values': images}
if segmentation_maps is not None:
segmentation_maps = [self._preprocess_mask(segmentation_map=segmentation_map, do_reduce_labels=do_reduce_labels, do_resize=do_resize, size=size, do_center_crop=do_center_crop, crop_size=crop_size, input_data_format=input_data_format) for segmentation_map in segmentation_maps]
data['labels'] = segmentation_maps
return BatchFeature(data=data, tensor_type=return_tensors)
def post_process_semantic_segmentation(self, outputs, target_sizes: Optional[list[tuple]]=None):
"""
Converts the output of [`MobileNetV2ForSemanticSegmentation`] into semantic segmentation maps.
Args:
outputs ([`MobileNetV2ForSemanticSegmentation`]):
Raw outputs of the model.
target_sizes (`list[Tuple]` of length `batch_size`, *optional*):
List of tuples corresponding to the requested final size (height, width) of each prediction. If unset,
predictions will not be resized.
Returns:
semantic_segmentation: `list[torch.Tensor]` of length `batch_size`, where each item is a semantic
segmentation map of shape (height, width) corresponding to the target_sizes entry (if `target_sizes` is
specified). Each entry of each `torch.Tensor` correspond to a semantic class id.
"""
logits = outputs.logits
if target_sizes is not None:
if len(logits) != len(target_sizes):
raise ValueError('Make sure that you pass in as many target sizes as the batch dimension of the logits')
if is_torch_tensor(target_sizes):
target_sizes = target_sizes.numpy()
semantic_segmentation = []
for idx in range(len(logits)):
resized_logits = torch.nn.functional.interpolate(logits[idx].unsqueeze(dim=0), size=target_sizes[idx], mode='bilinear', align_corners=False)
semantic_map = resized_logits[0].argmax(dim=0)
semantic_segmentation.append(semantic_map)
else:
semantic_segmentation = logits.argmax(dim=1)
semantic_segmentation = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
return semantic_segmentation
|
@requires(backends=('vision',))
class MobileNetV2ImageProcessor(BaseImageProcessor):
'''
Constructs a MobileNetV2 image processor.
Args:
do_resize (`bool`, *optional*, defaults to `True`):
Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by
`do_resize` in the `preprocess` method.
size (`dict[str, int]` *optional*, defaults to `{"shortest_edge": 256}`):
Size of the image after resizing. The shortest edge of the image is resized to size["shortest_edge"], with
the longest edge resized to keep the input aspect ratio. Can be overridden by `size` in the `preprocess`
method.
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`):
Resampling filter to use if resizing the image. Can be overridden by the `resample` parameter in the
`preprocess` method.
do_center_crop (`bool`, *optional*, defaults to `True`):
Whether to center crop the image. If the input size is smaller than `crop_size` along any edge, the image
is padded with 0's and then center cropped. Can be overridden by the `do_center_crop` parameter in the
`preprocess` method.
crop_size (`dict[str, int]`, *optional*, defaults to `{"height": 224, "width": 224}`):
Desired output size when applying center-cropping. Only has an effect if `do_center_crop` is set to `True`.
Can be overridden by the `crop_size` parameter in the `preprocess` method.
do_rescale (`bool`, *optional*, defaults to `True`):
Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale`
parameter in the `preprocess` method.
rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the
`preprocess` method.
do_normalize:
Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess`
method.
image_mean (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`):
Mean to use if normalizing the image. This is a float or list of floats the length of the number of
channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
image_std (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`):
Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
do_reduce_labels (`bool`, *optional*, defaults to `False`):
Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is
used for background, and background itself is not included in all classes of a dataset (e.g. ADE20k). The
background label will be replaced by 255. Can be overridden by the `do_reduce_labels` parameter in the
`preprocess` method.
'''
def __init__(self, do_resize: bool=True, size: Optional[dict[str, int]]=None, resample: PILImageResampling=PILImageResampling.BILINEAR, do_center_crop: bool=True, crop_size: Optional[dict[str, int]]=None, do_rescale: bool=True, rescale_factor: Union[int, float]=1 / 255, do_normalize: bool=True, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, do_reduce_labels: bool=False, **kwargs) -> None:
pass
def resize(self, image: np.ndarray, size: dict[str, int], resample: PILImageResampling=PILImageResampling.BICUBIC, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray:
'''
Resize an image. The shortest edge of the image is resized to size["shortest_edge"], with the longest edge
resized to keep the input aspect ratio.
Args:
image (`np.ndarray`):
Image to resize.
size (`dict[str, int]`):
Size of the output image.
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
Resampling filter to use when resiizing the image.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the image. If not provided, it will be the same as the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
'''
pass
def reduce_label(self, label: ImageInput) -> np.ndarray:
pass
def __call__(self, images, segmentation_maps=None, **kwargs):
'''
Preprocesses a batch of images and optionally segmentation maps.
Overrides the `__call__` method of the `Preprocessor` class so that both images and segmentation maps can be
passed in as positional arguments.
'''
pass
def _preprocess(self, image: ImageInput, do_reduce_labels: bool, do_resize: bool, do_rescale: bool, do_center_crop: bool, do_normalize: bool, size: Optional[dict[str, int]]=None, resample: Optional[PILImageResampling]=None, rescale_factor: Optional[float]=None, crop_size: Optional[dict[str, int]]=None, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None):
pass
def _preprocess_image(self, image: ImageInput, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, resample: Optional[PILImageResampling]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_center_crop: Optional[bool]=None, crop_size: Optional[dict[str, int]]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray:
'''Preprocesses a single image.'''
pass
def _preprocess_mask(self, segmentation_map: ImageInput, do_reduce_labels: Optional[bool]=None, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, do_center_crop: Optional[bool]=None, crop_size: Optional[dict[str, int]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray:
'''Preprocesses a single mask.'''
pass
@filter_out_non_signature_kwargs()
def preprocess(self, images: ImageInput, segmentation_maps: Optional[ImageInput]=None, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, resample: Optional[PILImageResampling]=None, do_center_crop: Optional[bool]=None, crop_size: Optional[dict[str, int]]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, do_reduce_labels: Optional[bool]=None, return_tensors: Optional[Union[str, TensorType]]=None, data_format: Union[str, ChannelDimension]=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None):
'''
Preprocess an image or batch of images.
Args:
images (`ImageInput`):
Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
passing in images with pixel values between 0 and 1, set `do_rescale=False`.
segmentation_maps (`ImageInput`, *optional*):
Segmentation map to preprocess.
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
Whether to resize the image.
size (`dict[str, int]`, *optional*, defaults to `self.size`):
Size of the image after resizing. Shortest edge of the image is resized to size["shortest_edge"], with
the longest edge resized to keep the input aspect ratio.
resample (`PILImageResampling` filter, *optional*, defaults to `self.resample`):
`PILImageResampling` filter to use if resizing the image e.g. `PILImageResampling.BILINEAR`. Only has
an effect if `do_resize` is set to `True`.
do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`):
Whether to center crop the image.
crop_size (`dict[str, int]`, *optional*, defaults to `self.crop_size`):
Size of the center crop. Only has an effect if `do_center_crop` is set to `True`.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the image values between [0 - 1].
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
Whether to normalize the image.
image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`):
Image mean to use if `do_normalize` is set to `True`.
image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`):
Image standard deviation to use if `do_normalize` is set to `True`.
do_reduce_labels (`bool`, *optional*, defaults to `self.do_reduce_labels`):
Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0
is used for background, and background itself is not included in all classes of a dataset (e.g.
ADE20k). The background label will be replaced by 255.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- Unset: Use the channel dimension format of the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
'''
pass
def post_process_semantic_segmentation(self, outputs, target_sizes: Optional[list[tuple]]=None):
'''
Converts the output of [`MobileNetV2ForSemanticSegmentation`] into semantic segmentation maps.
Args:
outputs ([`MobileNetV2ForSemanticSegmentation`]):
Raw outputs of the model.
target_sizes (`list[Tuple]` of length `batch_size`, *optional*):
List of tuples corresponding to the requested final size (height, width) of each prediction. If unset,
predictions will not be resized.
Returns:
semantic_segmentation: `list[torch.Tensor]` of length `batch_size`, where each item is a semantic
segmentation map of shape (height, width) corresponding to the target_sizes entry (if `target_sizes` is
specified). Each entry of each `torch.Tensor` correspond to a semantic class id.
'''
pass
| 12
| 7
| 63
| 5
| 39
| 20
| 8
| 0.75
| 1
| 9
| 2
| 1
| 4
| 10
| 4
| 24
| 300
| 26
| 157
| 63
| 114
| 117
| 78
| 25
| 73
| 19
| 3
| 2
| 32
|
3,901
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mobilenet_v2/modeling_mobilenet_v2.py
|
transformers.models.mobilenet_v2.modeling_mobilenet_v2.MobileNetV2ConvLayer
|
from typing import Optional, Union
from .configuration_mobilenet_v2 import MobileNetV2Config
import torch
from torch import nn
from ...activations import ACT2FN
class MobileNetV2ConvLayer(nn.Module):
def __init__(self, config: MobileNetV2Config, in_channels: int, out_channels: int, kernel_size: int, stride: int=1, groups: int=1, bias: bool=False, dilation: int=1, use_normalization: bool=True, use_activation: Union[bool, str]=True, layer_norm_eps: Optional[float]=None) -> None:
super().__init__()
self.config = config
if in_channels % groups != 0:
raise ValueError(f'Input channels ({in_channels}) are not divisible by {groups} groups.')
if out_channels % groups != 0:
raise ValueError(f'Output channels ({out_channels}) are not divisible by {groups} groups.')
padding = 0 if config.tf_padding else int((kernel_size - 1) / 2) * dilation
self.convolution = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias, padding_mode='zeros')
if use_normalization:
self.normalization = nn.BatchNorm2d(num_features=out_channels, eps=config.layer_norm_eps if layer_norm_eps is None else layer_norm_eps, momentum=0.997, affine=True, track_running_stats=True)
else:
self.normalization = None
if use_activation:
if isinstance(use_activation, str):
self.activation = ACT2FN[use_activation]
elif isinstance(config.hidden_act, str):
self.activation = ACT2FN[config.hidden_act]
else:
self.activation = config.hidden_act
else:
self.activation = None
def forward(self, features: torch.Tensor) -> torch.Tensor:
if self.config.tf_padding:
features = apply_tf_padding(features, self.convolution)
features = self.convolution(features)
if self.normalization is not None:
features = self.normalization(features)
if self.activation is not None:
features = self.activation(features)
return features
|
class MobileNetV2ConvLayer(nn.Module):
def __init__(self, config: MobileNetV2Config, in_channels: int, out_channels: int, kernel_size: int, stride: int=1, groups: int=1, bias: bool=False, dilation: int=1, use_normalization: bool=True, use_activation: Union[bool, str]=True, layer_norm_eps: Optional[float]=None) -> None:
pass
def forward(self, features: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 33
| 3
| 30
| 0
| 7
| 0
| 1
| 8
| 1
| 0
| 2
| 4
| 2
| 12
| 67
| 6
| 61
| 21
| 45
| 0
| 28
| 8
| 25
| 9
| 1
| 2
| 13
|
3,902
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mobilenet_v2/modeling_mobilenet_v2.py
|
transformers.models.mobilenet_v2.modeling_mobilenet_v2.MobileNetV2DeepLabV3Plus
|
from .configuration_mobilenet_v2 import MobileNetV2Config
import torch
from torch import nn
class MobileNetV2DeepLabV3Plus(nn.Module):
"""
The neural network from the paper "Encoder-Decoder with Atrous Separable Convolution for Semantic Image
Segmentation" https://huggingface.co/papers/1802.02611
"""
def __init__(self, config: MobileNetV2Config) -> None:
super().__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(output_size=1)
self.conv_pool = MobileNetV2ConvLayer(config, in_channels=apply_depth_multiplier(config, 320), out_channels=256, kernel_size=1, stride=1, use_normalization=True, use_activation='relu', layer_norm_eps=1e-05)
self.conv_aspp = MobileNetV2ConvLayer(config, in_channels=apply_depth_multiplier(config, 320), out_channels=256, kernel_size=1, stride=1, use_normalization=True, use_activation='relu', layer_norm_eps=1e-05)
self.conv_projection = MobileNetV2ConvLayer(config, in_channels=512, out_channels=256, kernel_size=1, stride=1, use_normalization=True, use_activation='relu', layer_norm_eps=1e-05)
self.dropout = nn.Dropout2d(config.classifier_dropout_prob)
self.classifier = MobileNetV2ConvLayer(config, in_channels=256, out_channels=config.num_labels, kernel_size=1, use_normalization=False, use_activation=False, bias=True)
def forward(self, features: torch.Tensor) -> torch.Tensor:
spatial_size = features.shape[-2:]
features_pool = self.avg_pool(features)
features_pool = self.conv_pool(features_pool)
features_pool = nn.functional.interpolate(features_pool, size=spatial_size, mode='bilinear', align_corners=True)
features_aspp = self.conv_aspp(features)
features = torch.cat([features_pool, features_aspp], dim=1)
features = self.conv_projection(features)
features = self.dropout(features)
features = self.classifier(features)
return features
|
class MobileNetV2DeepLabV3Plus(nn.Module):
'''
The neural network from the paper "Encoder-Decoder with Atrous Separable Convolution for Semantic Image
Segmentation" https://huggingface.co/papers/1802.02611
'''
def __init__(self, config: MobileNetV2Config) -> None:
pass
def forward(self, features: torch.Tensor) -> torch.Tensor:
pass
| 3
| 1
| 33
| 5
| 28
| 0
| 1
| 0.07
| 1
| 4
| 2
| 0
| 2
| 6
| 2
| 12
| 73
| 12
| 57
| 12
| 54
| 4
| 20
| 12
| 17
| 1
| 1
| 0
| 2
|
3,903
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mobilenet_v2/modeling_mobilenet_v2.py
|
transformers.models.mobilenet_v2.modeling_mobilenet_v2.MobileNetV2ForImageClassification
|
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention, SemanticSegmenterOutput
from ...utils import auto_docstring, logging
import torch
from typing import Optional, Union
from .configuration_mobilenet_v2 import MobileNetV2Config
from torch import nn
@auto_docstring(custom_intro='\n MobileNetV2 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ')
class MobileNetV2ForImageClassification(MobileNetV2PreTrainedModel):
def __init__(self, config: MobileNetV2Config) -> None:
super().__init__(config)
self.num_labels = config.num_labels
self.mobilenet_v2 = MobileNetV2Model(config)
last_hidden_size = self.mobilenet_v2.conv_1x1.convolution.out_channels
self.dropout = nn.Dropout(config.classifier_dropout_prob, inplace=True)
self.classifier = nn.Linear(last_hidden_size, config.num_labels) if config.num_labels > 0 else nn.Identity()
self.post_init()
@auto_docstring
def forward(self, pixel_values: Optional[torch.Tensor]=None, output_hidden_states: Optional[bool]=None, labels: Optional[torch.Tensor]=None, return_dict: Optional[bool]=None) -> Union[tuple, ImageClassifierOutputWithNoAttention]:
"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss). If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.mobilenet_v2(pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict)
pooled_output = outputs.pooler_output if return_dict else outputs[1]
logits = self.classifier(self.dropout(pooled_output))
loss = None
if labels is not None:
loss = self.loss_function(labels, logits, self.config)
if not return_dict:
output = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=loss, logits=logits, hidden_states=outputs.hidden_states)
|
@auto_docstring(custom_intro='\n MobileNetV2 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ')
class MobileNetV2ForImageClassification(MobileNetV2PreTrainedModel):
def __init__(self, config: MobileNetV2Config) -> None:
pass
@auto_docstring
def forward(self, pixel_values: Optional[torch.Tensor]=None, output_hidden_states: Optional[bool]=None, labels: Optional[torch.Tensor]=None, return_dict: Optional[bool]=None) -> Union[tuple, ImageClassifierOutputWithNoAttention]:
'''
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss). If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
'''
pass
| 5
| 1
| 34
| 6
| 24
| 4
| 8
| 0.14
| 1
| 8
| 3
| 0
| 2
| 4
| 2
| 3
| 76
| 12
| 56
| 21
| 40
| 8
| 34
| 14
| 31
| 13
| 2
| 3
| 15
|
3,904
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mobilenet_v2/modeling_mobilenet_v2.py
|
transformers.models.mobilenet_v2.modeling_mobilenet_v2.MobileNetV2ForSemanticSegmentation
|
from torch import nn
from typing import Optional, Union
import torch
from ...utils import auto_docstring, logging
from .configuration_mobilenet_v2 import MobileNetV2Config
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention, SemanticSegmenterOutput
from torch.nn import CrossEntropyLoss
@auto_docstring(custom_intro='\n MobileNetV2 model with a semantic segmentation head on top, e.g. for Pascal VOC.\n ')
class MobileNetV2ForSemanticSegmentation(MobileNetV2PreTrainedModel):
def __init__(self, config: MobileNetV2Config) -> None:
super().__init__(config)
self.num_labels = config.num_labels
self.mobilenet_v2 = MobileNetV2Model(config, add_pooling_layer=False)
self.segmentation_head = MobileNetV2DeepLabV3Plus(config)
self.post_init()
@auto_docstring
def forward(self, pixel_values: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, SemanticSegmenterOutput]:
"""
labels (`torch.LongTensor` of shape `(batch_size, height, width)`, *optional*):
Ground truth semantic segmentation maps for computing the loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels > 1`, a classification loss is computed (Cross-Entropy).
Examples:
```python
>>> from transformers import AutoImageProcessor, MobileNetV2ForSemanticSegmentation
>>> from PIL import Image
>>> import requests
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> image_processor = AutoImageProcessor.from_pretrained("google/deeplabv3_mobilenet_v2_1.0_513")
>>> model = MobileNetV2ForSemanticSegmentation.from_pretrained("google/deeplabv3_mobilenet_v2_1.0_513")
>>> inputs = image_processor(images=image, return_tensors="pt")
>>> with torch.no_grad():
... outputs = model(**inputs)
>>> # logits are of shape (batch_size, num_labels, height, width)
>>> logits = outputs.logits
```"""
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if labels is not None and self.config.num_labels == 1:
raise ValueError('The number of labels should be greater than one')
outputs = self.mobilenet_v2(pixel_values, output_hidden_states=True, return_dict=return_dict)
encoder_hidden_states = outputs.hidden_states if return_dict else outputs[1]
logits = self.segmentation_head(encoder_hidden_states[-1])
loss = None
if labels is not None:
upsampled_logits = nn.functional.interpolate(logits, size=labels.shape[-2:], mode='bilinear', align_corners=False)
loss_fct = CrossEntropyLoss(ignore_index=self.config.semantic_loss_ignore_index)
loss = loss_fct(upsampled_logits, labels)
if not return_dict:
if output_hidden_states:
output = (logits,) + outputs[1:]
else:
output = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return SemanticSegmenterOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states if output_hidden_states else None, attentions=None)
|
@auto_docstring(custom_intro='\n MobileNetV2 model with a semantic segmentation head on top, e.g. for Pascal VOC.\n ')
class MobileNetV2ForSemanticSegmentation(MobileNetV2PreTrainedModel):
def __init__(self, config: MobileNetV2Config) -> None:
pass
@auto_docstring
def forward(self, pixel_values: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, SemanticSegmenterOutput]:
'''
labels (`torch.LongTensor` of shape `(batch_size, height, width)`, *optional*):
Ground truth semantic segmentation maps for computing the loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels > 1`, a classification loss is computed (Cross-Entropy).
Examples:
```python
>>> from transformers import AutoImageProcessor, MobileNetV2ForSemanticSegmentation
>>> from PIL import Image
>>> import requests
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> image_processor = AutoImageProcessor.from_pretrained("google/deeplabv3_mobilenet_v2_1.0_513")
>>> model = MobileNetV2ForSemanticSegmentation.from_pretrained("google/deeplabv3_mobilenet_v2_1.0_513")
>>> inputs = image_processor(images=image, return_tensors="pt")
>>> with torch.no_grad():
... outputs = model(**inputs)
>>> # logits are of shape (batch_size, num_labels, height, width)
>>> logits = outputs.logits
```'''
pass
| 5
| 1
| 42
| 9
| 23
| 12
| 6
| 0.48
| 1
| 9
| 4
| 0
| 2
| 3
| 2
| 3
| 88
| 18
| 48
| 20
| 37
| 23
| 26
| 13
| 23
| 10
| 2
| 2
| 11
|
3,905
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mobilenet_v2/modeling_mobilenet_v2.py
|
transformers.models.mobilenet_v2.modeling_mobilenet_v2.MobileNetV2InvertedResidual
|
from .configuration_mobilenet_v2 import MobileNetV2Config
from torch import nn
import torch
class MobileNetV2InvertedResidual(nn.Module):
def __init__(self, config: MobileNetV2Config, in_channels: int, out_channels: int, stride: int, dilation: int=1) -> None:
super().__init__()
expanded_channels = make_divisible(int(round(in_channels * config.expand_ratio)), config.depth_divisible_by, config.min_depth)
if stride not in [1, 2]:
raise ValueError(f'Invalid stride {stride}.')
self.use_residual = stride == 1 and in_channels == out_channels
self.expand_1x1 = MobileNetV2ConvLayer(config, in_channels=in_channels, out_channels=expanded_channels, kernel_size=1)
self.conv_3x3 = MobileNetV2ConvLayer(config, in_channels=expanded_channels, out_channels=expanded_channels, kernel_size=3, stride=stride, groups=expanded_channels, dilation=dilation)
self.reduce_1x1 = MobileNetV2ConvLayer(config, in_channels=expanded_channels, out_channels=out_channels, kernel_size=1, use_activation=False)
def forward(self, features: torch.Tensor) -> torch.Tensor:
residual = features
features = self.expand_1x1(features)
features = self.conv_3x3(features)
features = self.reduce_1x1(features)
return residual + features if self.use_residual else features
|
class MobileNetV2InvertedResidual(nn.Module):
def __init__(self, config: MobileNetV2Config, in_channels: int, out_channels: int, stride: int, dilation: int=1) -> None:
pass
def forward(self, features: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 22
| 4
| 18
| 0
| 2
| 0
| 1
| 6
| 2
| 0
| 2
| 4
| 2
| 12
| 45
| 9
| 36
| 11
| 31
| 0
| 16
| 9
| 13
| 2
| 1
| 1
| 4
|
3,906
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mobilenet_v2/modeling_mobilenet_v2.py
|
transformers.models.mobilenet_v2.modeling_mobilenet_v2.MobileNetV2Model
|
from ...utils import auto_docstring, logging
from typing import Optional, Union
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention, SemanticSegmenterOutput
from .configuration_mobilenet_v2 import MobileNetV2Config
import torch
from torch import nn
@auto_docstring
class MobileNetV2Model(MobileNetV2PreTrainedModel):
def __init__(self, config: MobileNetV2Config, add_pooling_layer: bool=True):
"""
add_pooling_layer (bool, *optional*, defaults to `True`):
Whether to add a pooling layer
"""
super().__init__(config)
self.config = config
channels = [16, 24, 24, 32, 32, 32, 64, 64, 64, 64, 96, 96, 96, 160, 160, 160, 320]
channels = [apply_depth_multiplier(config, x) for x in channels]
strides = [2, 1, 2, 1, 1, 2, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1]
self.conv_stem = MobileNetV2Stem(config, in_channels=config.num_channels, expanded_channels=apply_depth_multiplier(config, 32), out_channels=channels[0])
current_stride = 2
dilation = 1
self.layer = nn.ModuleList()
for i in range(16):
if current_stride == config.output_stride:
layer_stride = 1
layer_dilation = dilation
dilation *= strides[i]
else:
layer_stride = strides[i]
layer_dilation = 1
current_stride *= layer_stride
self.layer.append(MobileNetV2InvertedResidual(config, in_channels=channels[i], out_channels=channels[i + 1], stride=layer_stride, dilation=layer_dilation))
if config.finegrained_output and config.depth_multiplier < 1.0:
output_channels = 1280
else:
output_channels = apply_depth_multiplier(config, 1280)
self.conv_1x1 = MobileNetV2ConvLayer(config, in_channels=channels[-1], out_channels=output_channels, kernel_size=1)
self.pooler = nn.AdaptiveAvgPool2d((1, 1)) if add_pooling_layer else None
self.post_init()
def _prune_heads(self, heads_to_prune):
raise NotImplementedError
@auto_docstring
def forward(self, pixel_values: Optional[torch.Tensor]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutputWithPoolingAndNoAttention]:
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('You have to specify pixel_values')
hidden_states = self.conv_stem(pixel_values)
all_hidden_states = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer):
hidden_states = layer_module(hidden_states)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
last_hidden_state = self.conv_1x1(hidden_states)
if self.pooler is not None:
pooled_output = torch.flatten(self.pooler(last_hidden_state), start_dim=1)
else:
pooled_output = None
if not return_dict:
return tuple((v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None))
return BaseModelOutputWithPoolingAndNoAttention(last_hidden_state=last_hidden_state, pooler_output=pooled_output, hidden_states=all_hidden_states)
|
@auto_docstring
class MobileNetV2Model(MobileNetV2PreTrainedModel):
def __init__(self, config: MobileNetV2Config, add_pooling_layer: bool=True):
'''
add_pooling_layer (bool, *optional*, defaults to `True`):
Whether to add a pooling layer
'''
pass
def _prune_heads(self, heads_to_prune):
pass
@auto_docstring
def forward(self, pixel_values: Optional[torch.Tensor]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutputWithPoolingAndNoAttention]:
pass
| 6
| 1
| 33
| 6
| 26
| 2
| 5
| 0.07
| 1
| 13
| 5
| 0
| 3
| 5
| 3
| 4
| 111
| 21
| 86
| 28
| 69
| 6
| 46
| 22
| 42
| 9
| 2
| 2
| 15
|
3,907
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mobilenet_v2/modeling_mobilenet_v2.py
|
transformers.models.mobilenet_v2.modeling_mobilenet_v2.MobileNetV2PreTrainedModel
|
from typing import Optional, Union
from ...utils import auto_docstring, logging
from ...modeling_utils import PreTrainedModel
from .configuration_mobilenet_v2 import MobileNetV2Config
from torch import nn
@auto_docstring
class MobileNetV2PreTrainedModel(PreTrainedModel):
config: MobileNetV2Config
base_model_prefix = 'mobilenet_v2'
main_input_name = 'pixel_values'
supports_gradient_checkpointing = False
_no_split_modules = []
def _init_weights(self, module: Union[nn.Linear, nn.Conv2d]) -> None:
"""Initialize the weights"""
if isinstance(module, (nn.Linear, nn.Conv2d)):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.BatchNorm2d):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
|
@auto_docstring
class MobileNetV2PreTrainedModel(PreTrainedModel):
def _init_weights(self, module: Union[nn.Linear, nn.Conv2d]) -> None:
'''Initialize the weights'''
pass
| 3
| 1
| 9
| 0
| 8
| 1
| 4
| 0.33
| 1
| 0
| 0
| 3
| 1
| 0
| 1
| 1
| 22
| 2
| 15
| 8
| 13
| 5
| 14
| 8
| 12
| 4
| 1
| 2
| 4
|
3,908
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mobilenet_v2/modeling_mobilenet_v2.py
|
transformers.models.mobilenet_v2.modeling_mobilenet_v2.MobileNetV2Stem
|
from .configuration_mobilenet_v2 import MobileNetV2Config
from torch import nn
import torch
class MobileNetV2Stem(nn.Module):
def __init__(self, config: MobileNetV2Config, in_channels: int, expanded_channels: int, out_channels: int) -> None:
super().__init__()
self.first_conv = MobileNetV2ConvLayer(config, in_channels=in_channels, out_channels=expanded_channels, kernel_size=3, stride=2)
if config.first_layer_is_expansion:
self.expand_1x1 = None
else:
self.expand_1x1 = MobileNetV2ConvLayer(config, in_channels=expanded_channels, out_channels=expanded_channels, kernel_size=1)
self.conv_3x3 = MobileNetV2ConvLayer(config, in_channels=expanded_channels, out_channels=expanded_channels, kernel_size=3, stride=1, groups=expanded_channels)
self.reduce_1x1 = MobileNetV2ConvLayer(config, in_channels=expanded_channels, out_channels=out_channels, kernel_size=1, use_activation=False)
def forward(self, features: torch.Tensor) -> torch.Tensor:
features = self.first_conv(features)
if self.expand_1x1 is not None:
features = self.expand_1x1(features)
features = self.conv_3x3(features)
features = self.reduce_1x1(features)
return features
|
class MobileNetV2Stem(nn.Module):
def __init__(self, config: MobileNetV2Config, in_channels: int, expanded_channels: int, out_channels: int) -> None:
pass
def forward(self, features: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 22
| 2
| 19
| 1
| 2
| 0.05
| 1
| 5
| 2
| 0
| 2
| 4
| 2
| 12
| 45
| 5
| 38
| 7
| 35
| 2
| 16
| 7
| 13
| 2
| 1
| 1
| 4
|
3,909
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mobilevit/configuration_mobilevit.py
|
transformers.models.mobilevit.configuration_mobilevit.MobileViTConfig
|
from ...configuration_utils import PretrainedConfig
class MobileViTConfig(PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`MobileViTModel`]. It is used to instantiate a
MobileViT model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the MobileViT
[apple/mobilevit-small](https://huggingface.co/apple/mobilevit-small) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
num_channels (`int`, *optional*, defaults to 3):
The number of input channels.
image_size (`int`, *optional*, defaults to 256):
The size (resolution) of each image.
patch_size (`int`, *optional*, defaults to 2):
The size (resolution) of each patch.
hidden_sizes (`list[int]`, *optional*, defaults to `[144, 192, 240]`):
Dimensionality (hidden size) of the Transformer encoders at each stage.
neck_hidden_sizes (`list[int]`, *optional*, defaults to `[16, 32, 64, 96, 128, 160, 640]`):
The number of channels for the feature maps of the backbone.
num_attention_heads (`int`, *optional*, defaults to 4):
Number of attention heads for each attention layer in the Transformer encoder.
mlp_ratio (`float`, *optional*, defaults to 2.0):
The ratio of the number of channels in the output of the MLP to the number of channels in the input.
expand_ratio (`float`, *optional*, defaults to 4.0):
Expansion factor for the MobileNetv2 layers.
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the Transformer encoder and convolution layers.
conv_kernel_size (`int`, *optional*, defaults to 3):
The size of the convolutional kernel in the MobileViT layer.
output_stride (`int`, *optional*, defaults to 32):
The ratio of the spatial resolution of the output to the resolution of the input image.
hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the Transformer encoder.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
classifier_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout ratio for attached classifiers.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the layer normalization layers.
qkv_bias (`bool`, *optional*, defaults to `True`):
Whether to add a bias to the queries, keys and values.
aspp_out_channels (`int`, *optional*, defaults to 256):
Number of output channels used in the ASPP layer for semantic segmentation.
atrous_rates (`list[int]`, *optional*, defaults to `[6, 12, 18]`):
Dilation (atrous) factors used in the ASPP layer for semantic segmentation.
aspp_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout ratio for the ASPP layer for semantic segmentation.
semantic_loss_ignore_index (`int`, *optional*, defaults to 255):
The index that is ignored by the loss function of the semantic segmentation model.
Example:
```python
>>> from transformers import MobileViTConfig, MobileViTModel
>>> # Initializing a mobilevit-small style configuration
>>> configuration = MobileViTConfig()
>>> # Initializing a model from the mobilevit-small style configuration
>>> model = MobileViTModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = 'mobilevit'
def __init__(self, num_channels=3, image_size=256, patch_size=2, hidden_sizes=[144, 192, 240], neck_hidden_sizes=[16, 32, 64, 96, 128, 160, 640], num_attention_heads=4, mlp_ratio=2.0, expand_ratio=4.0, hidden_act='silu', conv_kernel_size=3, output_stride=32, hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.0, classifier_dropout_prob=0.1, initializer_range=0.02, layer_norm_eps=1e-05, qkv_bias=True, aspp_out_channels=256, atrous_rates=[6, 12, 18], aspp_dropout_prob=0.1, semantic_loss_ignore_index=255, **kwargs):
super().__init__(**kwargs)
self.num_channels = num_channels
self.image_size = image_size
self.patch_size = patch_size
self.hidden_sizes = hidden_sizes
self.neck_hidden_sizes = neck_hidden_sizes
self.num_attention_heads = num_attention_heads
self.mlp_ratio = mlp_ratio
self.expand_ratio = expand_ratio
self.hidden_act = hidden_act
self.conv_kernel_size = conv_kernel_size
self.output_stride = output_stride
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.classifier_dropout_prob = classifier_dropout_prob
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.qkv_bias = qkv_bias
self.aspp_out_channels = aspp_out_channels
self.atrous_rates = atrous_rates
self.aspp_dropout_prob = aspp_dropout_prob
self.semantic_loss_ignore_index = semantic_loss_ignore_index
|
class MobileViTConfig(PretrainedConfig):
'''
This is the configuration class to store the configuration of a [`MobileViTModel`]. It is used to instantiate a
MobileViT model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the MobileViT
[apple/mobilevit-small](https://huggingface.co/apple/mobilevit-small) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
num_channels (`int`, *optional*, defaults to 3):
The number of input channels.
image_size (`int`, *optional*, defaults to 256):
The size (resolution) of each image.
patch_size (`int`, *optional*, defaults to 2):
The size (resolution) of each patch.
hidden_sizes (`list[int]`, *optional*, defaults to `[144, 192, 240]`):
Dimensionality (hidden size) of the Transformer encoders at each stage.
neck_hidden_sizes (`list[int]`, *optional*, defaults to `[16, 32, 64, 96, 128, 160, 640]`):
The number of channels for the feature maps of the backbone.
num_attention_heads (`int`, *optional*, defaults to 4):
Number of attention heads for each attention layer in the Transformer encoder.
mlp_ratio (`float`, *optional*, defaults to 2.0):
The ratio of the number of channels in the output of the MLP to the number of channels in the input.
expand_ratio (`float`, *optional*, defaults to 4.0):
Expansion factor for the MobileNetv2 layers.
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the Transformer encoder and convolution layers.
conv_kernel_size (`int`, *optional*, defaults to 3):
The size of the convolutional kernel in the MobileViT layer.
output_stride (`int`, *optional*, defaults to 32):
The ratio of the spatial resolution of the output to the resolution of the input image.
hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the Transformer encoder.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
classifier_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout ratio for attached classifiers.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the layer normalization layers.
qkv_bias (`bool`, *optional*, defaults to `True`):
Whether to add a bias to the queries, keys and values.
aspp_out_channels (`int`, *optional*, defaults to 256):
Number of output channels used in the ASPP layer for semantic segmentation.
atrous_rates (`list[int]`, *optional*, defaults to `[6, 12, 18]`):
Dilation (atrous) factors used in the ASPP layer for semantic segmentation.
aspp_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout ratio for the ASPP layer for semantic segmentation.
semantic_loss_ignore_index (`int`, *optional*, defaults to 255):
The index that is ignored by the loss function of the semantic segmentation model.
Example:
```python
>>> from transformers import MobileViTConfig, MobileViTModel
>>> # Initializing a mobilevit-small style configuration
>>> configuration = MobileViTConfig()
>>> # Initializing a model from the mobilevit-small style configuration
>>> model = MobileViTModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```'''
def __init__(self, num_channels=3, image_size=256, patch_size=2, hidden_sizes=[144, 192, 240], neck_hidden_sizes=[16, 32, 64, 96, 128, 160, 640], num_attention_heads=4, mlp_ratio=2.0, expand_ratio=4.0, hidden_act='silu', conv_kernel_size=3, output_stride=32, hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.0, classifier_dropout_prob=0.1, initializer_range=0.02, layer_norm_eps=1e-05, qkv_bias=True, aspp_out_channels=256, atrous_rates=[6, 12, 18], aspp_dropout_prob=0.1, semantic_loss_ignore_index=255, **kwargs):
pass
| 2
| 1
| 50
| 2
| 47
| 1
| 1
| 1.24
| 1
| 1
| 0
| 0
| 1
| 21
| 1
| 1
| 121
| 11
| 49
| 48
| 23
| 61
| 25
| 24
| 23
| 1
| 1
| 0
| 1
|
3,910
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mobilevit/configuration_mobilevit.py
|
transformers.models.mobilevit.configuration_mobilevit.MobileViTOnnxConfig
|
from packaging import version
from collections.abc import Mapping
from ...onnx import OnnxConfig
from collections import OrderedDict
class MobileViTOnnxConfig(OnnxConfig):
torch_onnx_minimum_version = version.parse('1.11')
@property
def inputs(self) -> Mapping[str, Mapping[int, str]]:
return OrderedDict([('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'})])
@property
def outputs(self) -> Mapping[str, Mapping[int, str]]:
if self.task == 'image-classification':
return OrderedDict([('logits', {0: 'batch'})])
else:
return OrderedDict([('last_hidden_state', {0: 'batch'}), ('pooler_output', {0: 'batch'})])
@property
def atol_for_validation(self) -> float:
return 0.0001
|
class MobileViTOnnxConfig(OnnxConfig):
@property
def inputs(self) -> Mapping[str, Mapping[int, str]]:
pass
@property
def outputs(self) -> Mapping[str, Mapping[int, str]]:
pass
@property
def atol_for_validation(self) -> float:
pass
| 7
| 0
| 3
| 0
| 3
| 0
| 1
| 0
| 1
| 4
| 0
| 0
| 3
| 0
| 3
| 3
| 17
| 3
| 14
| 8
| 7
| 0
| 10
| 5
| 6
| 2
| 1
| 1
| 4
|
3,911
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mobilevit/feature_extraction_mobilevit.py
|
transformers.models.mobilevit.feature_extraction_mobilevit.MobileViTFeatureExtractor
|
from .image_processing_mobilevit import MobileViTImageProcessor
import warnings
from ...utils.import_utils import requires
@requires(backends=('vision',))
class MobileViTFeatureExtractor(MobileViTImageProcessor):
def __init__(self, *args, **kwargs) -> None:
warnings.warn('The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please use MobileViTImageProcessor instead.', FutureWarning)
super().__init__(*args, **kwargs)
|
@requires(backends=('vision',))
class MobileViTFeatureExtractor(MobileViTImageProcessor):
def __init__(self, *args, **kwargs) -> None:
pass
| 3
| 0
| 7
| 0
| 7
| 0
| 1
| 0
| 1
| 2
| 0
| 0
| 1
| 0
| 1
| 30
| 8
| 0
| 8
| 2
| 6
| 0
| 4
| 2
| 2
| 1
| 4
| 0
| 1
|
3,912
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mobilevit/image_processing_mobilevit.py
|
transformers.models.mobilevit.image_processing_mobilevit.MobileViTImageProcessor
|
from ...utils.import_utils import requires
import numpy as np
from ...image_transforms import flip_channel_order, get_resize_output_image_size, resize, to_channel_dimension_format
from typing import Optional, Union
from ...image_utils import ChannelDimension, ImageInput, PILImageResampling, infer_channel_dimension_format, is_scaled_image, make_flat_list_of_images, to_numpy_array, valid_images, validate_preprocess_arguments
from ...utils import TensorType, filter_out_non_signature_kwargs, is_torch_available, is_torch_tensor, is_vision_available, logging
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
@requires(backends=('vision',))
class MobileViTImageProcessor(BaseImageProcessor):
"""
Constructs a MobileViT image processor.
Args:
do_resize (`bool`, *optional*, defaults to `True`):
Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by the
`do_resize` parameter in the `preprocess` method.
size (`dict[str, int]` *optional*, defaults to `{"shortest_edge": 224}`):
Controls the size of the output image after resizing. Can be overridden by the `size` parameter in the
`preprocess` method.
resample (`PILImageResampling`, *optional*, defaults to `Resampling.BILINEAR`):
Defines the resampling filter to use if resizing the image. Can be overridden by the `resample` parameter
in the `preprocess` method.
do_rescale (`bool`, *optional*, defaults to `True`):
Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale`
parameter in the `preprocess` method.
rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the
`preprocess` method.
do_center_crop (`bool`, *optional*, defaults to `True`):
Whether to crop the input at the center. If the input size is smaller than `crop_size` along any edge, the
image is padded with 0's and then center cropped. Can be overridden by the `do_center_crop` parameter in
the `preprocess` method.
crop_size (`dict[str, int]`, *optional*, defaults to `{"height": 256, "width": 256}`):
Desired output size `(size["height"], size["width"])` when applying center-cropping. Can be overridden by
the `crop_size` parameter in the `preprocess` method.
do_flip_channel_order (`bool`, *optional*, defaults to `True`):
Whether to flip the color channels from RGB to BGR. Can be overridden by the `do_flip_channel_order`
parameter in the `preprocess` method.
do_reduce_labels (`bool`, *optional*, defaults to `False`):
Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is
used for background, and background itself is not included in all classes of a dataset (e.g. ADE20k). The
background label will be replaced by 255. Can be overridden by the `do_reduce_labels` parameter in the
`preprocess` method.
"""
model_input_names = ['pixel_values']
def __init__(self, do_resize: bool=True, size: Optional[dict[str, int]]=None, resample: PILImageResampling=PILImageResampling.BILINEAR, do_rescale: bool=True, rescale_factor: Union[int, float]=1 / 255, do_center_crop: bool=True, crop_size: Optional[dict[str, int]]=None, do_flip_channel_order: bool=True, do_reduce_labels: bool=False, **kwargs) -> None:
super().__init__(**kwargs)
size = size if size is not None else {'shortest_edge': 224}
size = get_size_dict(size, default_to_square=False)
crop_size = crop_size if crop_size is not None else {'height': 256, 'width': 256}
crop_size = get_size_dict(crop_size, param_name='crop_size')
self.do_resize = do_resize
self.size = size
self.resample = resample
self.do_rescale = do_rescale
self.rescale_factor = rescale_factor
self.do_center_crop = do_center_crop
self.crop_size = crop_size
self.do_flip_channel_order = do_flip_channel_order
self.do_reduce_labels = do_reduce_labels
def resize(self, image: np.ndarray, size: dict[str, int], resample: PILImageResampling=PILImageResampling.BILINEAR, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray:
"""
Resize an image. The shortest edge of the image is resized to size["shortest_edge"], with the longest edge
resized to keep the input aspect ratio.
Args:
image (`np.ndarray`):
Image to resize.
size (`dict[str, int]`):
Size of the output image.
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`):
Resampling filter to use when resiizing the image.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the image. If not provided, it will be the same as the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
"""
default_to_square = True
if 'shortest_edge' in size:
size = size['shortest_edge']
default_to_square = False
elif 'height' in size and 'width' in size:
size = (size['height'], size['width'])
else:
raise ValueError("Size must contain either 'shortest_edge' or 'height' and 'width'.")
output_size = get_resize_output_image_size(image, size=size, default_to_square=default_to_square, input_data_format=input_data_format)
return resize(image, size=output_size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs)
def flip_channel_order(self, image: np.ndarray, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray:
"""
Flip the color channels from RGB to BGR or vice versa.
Args:
image (`np.ndarray`):
The image, represented as a numpy array.
data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the image. If not provided, it will be the same as the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
"""
return flip_channel_order(image, data_format=data_format, input_data_format=input_data_format)
def reduce_label(self, label: ImageInput) -> np.ndarray:
label = to_numpy_array(label)
label[label == 0] = 255
label = label - 1
label[label == 254] = 255
return label
def __call__(self, images, segmentation_maps=None, **kwargs):
"""
Preprocesses a batch of images and optionally segmentation maps.
Overrides the `__call__` method of the `Preprocessor` class so that both images and segmentation maps can be
passed in as positional arguments.
"""
return super().__call__(images, segmentation_maps=segmentation_maps, **kwargs)
def _preprocess(self, image: ImageInput, do_reduce_labels: bool, do_resize: bool, do_rescale: bool, do_center_crop: bool, do_flip_channel_order: bool, size: Optional[dict[str, int]]=None, resample: Optional[PILImageResampling]=None, rescale_factor: Optional[float]=None, crop_size: Optional[dict[str, int]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None):
if do_reduce_labels:
image = self.reduce_label(image)
if do_resize:
image = self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format)
if do_rescale:
image = self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
if do_center_crop:
image = self.center_crop(image=image, size=crop_size, input_data_format=input_data_format)
if do_flip_channel_order:
image = self.flip_channel_order(image, input_data_format=input_data_format)
return image
def _preprocess_image(self, image: ImageInput, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, resample: Optional[PILImageResampling]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_center_crop: Optional[bool]=None, crop_size: Optional[dict[str, int]]=None, do_flip_channel_order: Optional[bool]=None, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray:
"""Preprocesses a single image."""
image = to_numpy_array(image)
if do_rescale and is_scaled_image(image):
logger.warning_once('It looks like you are trying to rescale already rescaled images. If the input images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again.')
if input_data_format is None:
input_data_format = infer_channel_dimension_format(image)
image = self._preprocess(image=image, do_reduce_labels=False, do_resize=do_resize, size=size, resample=resample, do_rescale=do_rescale, rescale_factor=rescale_factor, do_center_crop=do_center_crop, crop_size=crop_size, do_flip_channel_order=do_flip_channel_order, input_data_format=input_data_format)
image = to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format)
return image
def _preprocess_mask(self, segmentation_map: ImageInput, do_reduce_labels: Optional[bool]=None, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, do_center_crop: Optional[bool]=None, crop_size: Optional[dict[str, int]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray:
"""Preprocesses a single mask."""
segmentation_map = to_numpy_array(segmentation_map)
if segmentation_map.ndim == 2:
added_channel_dim = True
segmentation_map = segmentation_map[None, ...]
input_data_format = ChannelDimension.FIRST
else:
added_channel_dim = False
if input_data_format is None:
input_data_format = infer_channel_dimension_format(segmentation_map, num_channels=1)
segmentation_map = self._preprocess(image=segmentation_map, do_reduce_labels=do_reduce_labels, do_resize=do_resize, size=size, resample=PILImageResampling.NEAREST, do_rescale=False, do_center_crop=do_center_crop, crop_size=crop_size, do_flip_channel_order=False, input_data_format=input_data_format)
if added_channel_dim:
segmentation_map = segmentation_map.squeeze(0)
segmentation_map = segmentation_map.astype(np.int64)
return segmentation_map
@filter_out_non_signature_kwargs()
def preprocess(self, images: ImageInput, segmentation_maps: Optional[ImageInput]=None, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, resample: Optional[PILImageResampling]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_center_crop: Optional[bool]=None, crop_size: Optional[dict[str, int]]=None, do_flip_channel_order: Optional[bool]=None, do_reduce_labels: Optional[bool]=None, return_tensors: Optional[Union[str, TensorType]]=None, data_format: ChannelDimension=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> PIL.Image.Image:
"""
Preprocess an image or batch of images.
Args:
images (`ImageInput`):
Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
passing in images with pixel values between 0 and 1, set `do_rescale=False`.
segmentation_maps (`ImageInput`, *optional*):
Segmentation map to preprocess.
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
Whether to resize the image.
size (`dict[str, int]`, *optional*, defaults to `self.size`):
Size of the image after resizing.
resample (`int`, *optional*, defaults to `self.resample`):
Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`, Only
has an effect if `do_resize` is set to `True`.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the image by rescale factor.
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`):
Whether to center crop the image.
crop_size (`dict[str, int]`, *optional*, defaults to `self.crop_size`):
Size of the center crop if `do_center_crop` is set to `True`.
do_flip_channel_order (`bool`, *optional*, defaults to `self.do_flip_channel_order`):
Whether to flip the channel order of the image.
do_reduce_labels (`bool`, *optional*, defaults to `self.do_reduce_labels`):
Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0
is used for background, and background itself is not included in all classes of a dataset (e.g.
ADE20k). The background label will be replaced by 255.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. Can be one of:
- `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `ChannelDimension.LAST`: image in (height, width, num_channels) format.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
"""
do_resize = do_resize if do_resize is not None else self.do_resize
resample = resample if resample is not None else self.resample
do_rescale = do_rescale if do_rescale is not None else self.do_rescale
rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop
do_flip_channel_order = do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
size = size if size is not None else self.size
size = get_size_dict(size, default_to_square=False)
crop_size = crop_size if crop_size is not None else self.crop_size
crop_size = get_size_dict(crop_size, param_name='crop_size')
do_reduce_labels = do_reduce_labels if do_reduce_labels is not None else self.do_reduce_labels
images = make_flat_list_of_images(images)
if segmentation_maps is not None:
segmentation_maps = make_flat_list_of_images(segmentation_maps, expected_ndims=2)
images = make_flat_list_of_images(images)
if not valid_images(images):
raise ValueError('Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, or torch.Tensor')
if segmentation_maps is not None and (not valid_images(segmentation_maps)):
raise ValueError('Invalid segmentation map type. Must be of type PIL.Image.Image, numpy.ndarray, or torch.Tensor')
validate_preprocess_arguments(do_rescale=do_rescale, rescale_factor=rescale_factor, do_center_crop=do_center_crop, crop_size=crop_size, do_resize=do_resize, size=size, resample=resample)
images = [self._preprocess_image(image=img, do_resize=do_resize, size=size, resample=resample, do_rescale=do_rescale, rescale_factor=rescale_factor, do_center_crop=do_center_crop, crop_size=crop_size, do_flip_channel_order=do_flip_channel_order, data_format=data_format, input_data_format=input_data_format) for img in images]
data = {'pixel_values': images}
if segmentation_maps is not None:
segmentation_maps = [self._preprocess_mask(segmentation_map=segmentation_map, do_reduce_labels=do_reduce_labels, do_resize=do_resize, size=size, do_center_crop=do_center_crop, crop_size=crop_size, input_data_format=input_data_format) for segmentation_map in segmentation_maps]
data['labels'] = segmentation_maps
return BatchFeature(data=data, tensor_type=return_tensors)
def post_process_semantic_segmentation(self, outputs, target_sizes: Optional[list[tuple]]=None):
"""
Converts the output of [`MobileViTForSemanticSegmentation`] into semantic segmentation maps.
Args:
outputs ([`MobileViTForSemanticSegmentation`]):
Raw outputs of the model.
target_sizes (`list[Tuple]` of length `batch_size`, *optional*):
List of tuples corresponding to the requested final size (height, width) of each prediction. If unset,
predictions will not be resized.
Returns:
semantic_segmentation: `list[torch.Tensor]` of length `batch_size`, where each item is a semantic
segmentation map of shape (height, width) corresponding to the target_sizes entry (if `target_sizes` is
specified). Each entry of each `torch.Tensor` correspond to a semantic class id.
"""
logits = outputs.logits
if target_sizes is not None:
if len(logits) != len(target_sizes):
raise ValueError('Make sure that you pass in as many target sizes as the batch dimension of the logits')
if is_torch_tensor(target_sizes):
target_sizes = target_sizes.numpy()
semantic_segmentation = []
for idx in range(len(logits)):
resized_logits = torch.nn.functional.interpolate(logits[idx].unsqueeze(dim=0), size=target_sizes[idx], mode='bilinear', align_corners=False)
semantic_map = resized_logits[0].argmax(dim=0)
semantic_segmentation.append(semantic_map)
else:
semantic_segmentation = logits.argmax(dim=1)
semantic_segmentation = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
return semantic_segmentation
|
@requires(backends=('vision',))
class MobileViTImageProcessor(BaseImageProcessor):
'''
Constructs a MobileViT image processor.
Args:
do_resize (`bool`, *optional*, defaults to `True`):
Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by the
`do_resize` parameter in the `preprocess` method.
size (`dict[str, int]` *optional*, defaults to `{"shortest_edge": 224}`):
Controls the size of the output image after resizing. Can be overridden by the `size` parameter in the
`preprocess` method.
resample (`PILImageResampling`, *optional*, defaults to `Resampling.BILINEAR`):
Defines the resampling filter to use if resizing the image. Can be overridden by the `resample` parameter
in the `preprocess` method.
do_rescale (`bool`, *optional*, defaults to `True`):
Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale`
parameter in the `preprocess` method.
rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the
`preprocess` method.
do_center_crop (`bool`, *optional*, defaults to `True`):
Whether to crop the input at the center. If the input size is smaller than `crop_size` along any edge, the
image is padded with 0's and then center cropped. Can be overridden by the `do_center_crop` parameter in
the `preprocess` method.
crop_size (`dict[str, int]`, *optional*, defaults to `{"height": 256, "width": 256}`):
Desired output size `(size["height"], size["width"])` when applying center-cropping. Can be overridden by
the `crop_size` parameter in the `preprocess` method.
do_flip_channel_order (`bool`, *optional*, defaults to `True`):
Whether to flip the color channels from RGB to BGR. Can be overridden by the `do_flip_channel_order`
parameter in the `preprocess` method.
do_reduce_labels (`bool`, *optional*, defaults to `False`):
Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is
used for background, and background itself is not included in all classes of a dataset (e.g. ADE20k). The
background label will be replaced by 255. Can be overridden by the `do_reduce_labels` parameter in the
`preprocess` method.
'''
def __init__(self, do_resize: bool=True, size: Optional[dict[str, int]]=None, resample: PILImageResampling=PILImageResampling.BILINEAR, do_rescale: bool=True, rescale_factor: Union[int, float]=1 / 255, do_center_crop: bool=True, crop_size: Optional[dict[str, int]]=None, do_flip_channel_order: bool=True, do_reduce_labels: bool=False, **kwargs) -> None:
pass
def resize(self, image: np.ndarray, size: dict[str, int], resample: PILImageResampling=PILImageResampling.BILINEAR, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray:
'''
Resize an image. The shortest edge of the image is resized to size["shortest_edge"], with the longest edge
resized to keep the input aspect ratio.
Args:
image (`np.ndarray`):
Image to resize.
size (`dict[str, int]`):
Size of the output image.
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`):
Resampling filter to use when resiizing the image.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the image. If not provided, it will be the same as the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
'''
pass
def flip_channel_order(self, image: np.ndarray, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray:
'''
Flip the color channels from RGB to BGR or vice versa.
Args:
image (`np.ndarray`):
The image, represented as a numpy array.
data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the image. If not provided, it will be the same as the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
'''
pass
def reduce_label(self, label: ImageInput) -> np.ndarray:
pass
def __call__(self, images, segmentation_maps=None, **kwargs):
'''
Preprocesses a batch of images and optionally segmentation maps.
Overrides the `__call__` method of the `Preprocessor` class so that both images and segmentation maps can be
passed in as positional arguments.
'''
pass
def _preprocess(self, image: ImageInput, do_reduce_labels: bool, do_resize: bool, do_rescale: bool, do_center_crop: bool, do_flip_channel_order: bool, size: Optional[dict[str, int]]=None, resample: Optional[PILImageResampling]=None, rescale_factor: Optional[float]=None, crop_size: Optional[dict[str, int]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None):
pass
def _preprocess_image(self, image: ImageInput, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, resample: Optional[PILImageResampling]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_center_crop: Optional[bool]=None, crop_size: Optional[dict[str, int]]=None, do_flip_channel_order: Optional[bool]=None, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray:
'''Preprocesses a single image.'''
pass
def _preprocess_mask(self, segmentation_map: ImageInput, do_reduce_labels: Optional[bool]=None, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, do_center_crop: Optional[bool]=None, crop_size: Optional[dict[str, int]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray:
'''Preprocesses a single mask.'''
pass
@filter_out_non_signature_kwargs()
def preprocess(self, images: ImageInput, segmentation_maps: Optional[ImageInput]=None, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, resample: Optional[PILImageResampling]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_center_crop: Optional[bool]=None, crop_size: Optional[dict[str, int]]=None, do_flip_channel_order: Optional[bool]=None, do_reduce_labels: Optional[bool]=None, return_tensors: Optional[Union[str, TensorType]]=None, data_format: ChannelDimension=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> PIL.Image.Image:
'''
Preprocess an image or batch of images.
Args:
images (`ImageInput`):
Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
passing in images with pixel values between 0 and 1, set `do_rescale=False`.
segmentation_maps (`ImageInput`, *optional*):
Segmentation map to preprocess.
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
Whether to resize the image.
size (`dict[str, int]`, *optional*, defaults to `self.size`):
Size of the image after resizing.
resample (`int`, *optional*, defaults to `self.resample`):
Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`, Only
has an effect if `do_resize` is set to `True`.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the image by rescale factor.
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`):
Whether to center crop the image.
crop_size (`dict[str, int]`, *optional*, defaults to `self.crop_size`):
Size of the center crop if `do_center_crop` is set to `True`.
do_flip_channel_order (`bool`, *optional*, defaults to `self.do_flip_channel_order`):
Whether to flip the channel order of the image.
do_reduce_labels (`bool`, *optional*, defaults to `self.do_reduce_labels`):
Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0
is used for background, and background itself is not included in all classes of a dataset (e.g.
ADE20k). The background label will be replaced by 255.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. Can be one of:
- `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `ChannelDimension.LAST`: image in (height, width, num_channels) format.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
'''
pass
def post_process_semantic_segmentation(self, outputs, target_sizes: Optional[list[tuple]]=None):
'''
Converts the output of [`MobileViTForSemanticSegmentation`] into semantic segmentation maps.
Args:
outputs ([`MobileViTForSemanticSegmentation`]):
Raw outputs of the model.
target_sizes (`list[Tuple]` of length `batch_size`, *optional*):
List of tuples corresponding to the requested final size (height, width) of each prediction. If unset,
predictions will not be resized.
Returns:
semantic_segmentation: `list[torch.Tensor]` of length `batch_size`, where each item is a semantic
segmentation map of shape (height, width) corresponding to the target_sizes entry (if `target_sizes` is
specified). Each entry of each `torch.Tensor` correspond to a semantic class id.
'''
pass
| 13
| 8
| 43
| 4
| 29
| 10
| 4
| 0.48
| 1
| 9
| 2
| 1
| 9
| 8
| 9
| 29
| 429
| 44
| 261
| 101
| 178
| 124
| 103
| 28
| 93
| 13
| 3
| 2
| 38
|
3,913
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mobilevit/modeling_mobilevit.py
|
transformers.models.mobilevit.modeling_mobilevit.MobileViTASPP
|
import torch
from .configuration_mobilevit import MobileViTConfig
from torch import nn
class MobileViTASPP(nn.Module):
"""
ASPP module defined in DeepLab papers: https://huggingface.co/papers/1606.00915, https://huggingface.co/papers/1706.05587
"""
def __init__(self, config: MobileViTConfig) -> None:
super().__init__()
in_channels = config.neck_hidden_sizes[-2]
out_channels = config.aspp_out_channels
if len(config.atrous_rates) != 3:
raise ValueError('Expected 3 values for atrous_rates')
self.convs = nn.ModuleList()
in_projection = MobileViTConvLayer(config, in_channels=in_channels, out_channels=out_channels, kernel_size=1, use_activation='relu')
self.convs.append(in_projection)
self.convs.extend([MobileViTConvLayer(config, in_channels=in_channels, out_channels=out_channels, kernel_size=3, dilation=rate, use_activation='relu') for rate in config.atrous_rates])
pool_layer = MobileViTASPPPooling(config, in_channels, out_channels)
self.convs.append(pool_layer)
self.project = MobileViTConvLayer(config, in_channels=5 * out_channels, out_channels=out_channels, kernel_size=1, use_activation='relu')
self.dropout = nn.Dropout(p=config.aspp_dropout_prob)
def forward(self, features: torch.Tensor) -> torch.Tensor:
pyramid = []
for conv in self.convs:
pyramid.append(conv(features))
pyramid = torch.cat(pyramid, dim=1)
pooled_features = self.project(pyramid)
pooled_features = self.dropout(pooled_features)
return pooled_features
|
class MobileViTASPP(nn.Module):
'''
ASPP module defined in DeepLab papers: https://huggingface.co/papers/1606.00915, https://huggingface.co/papers/1706.05587
'''
def __init__(self, config: MobileViTConfig) -> None:
pass
def forward(self, features: torch.Tensor) -> torch.Tensor:
pass
| 3
| 1
| 26
| 5
| 21
| 0
| 2
| 0.07
| 1
| 6
| 3
| 0
| 2
| 3
| 2
| 12
| 57
| 11
| 43
| 13
| 40
| 3
| 23
| 13
| 20
| 2
| 1
| 1
| 4
|
3,914
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mobilevit/modeling_mobilevit.py
|
transformers.models.mobilevit.modeling_mobilevit.MobileViTASPPPooling
|
from torch import nn
import torch
from .configuration_mobilevit import MobileViTConfig
class MobileViTASPPPooling(nn.Module):
def __init__(self, config: MobileViTConfig, in_channels: int, out_channels: int) -> None:
super().__init__()
self.global_pool = nn.AdaptiveAvgPool2d(output_size=1)
self.conv_1x1 = MobileViTConvLayer(config, in_channels=in_channels, out_channels=out_channels, kernel_size=1, stride=1, use_normalization=True, use_activation='relu')
def forward(self, features: torch.Tensor) -> torch.Tensor:
spatial_size = features.shape[-2:]
features = self.global_pool(features)
features = self.conv_1x1(features)
features = nn.functional.interpolate(features, size=spatial_size, mode='bilinear', align_corners=False)
return features
|
class MobileViTASPPPooling(nn.Module):
def __init__(self, config: MobileViTConfig, in_channels: int, out_channels: int) -> None:
pass
def forward(self, features: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 10
| 1
| 9
| 0
| 1
| 0
| 1
| 5
| 2
| 0
| 2
| 2
| 2
| 12
| 22
| 3
| 19
| 6
| 16
| 0
| 11
| 6
| 8
| 1
| 1
| 0
| 2
|
3,915
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mobilevit/modeling_mobilevit.py
|
transformers.models.mobilevit.modeling_mobilevit.MobileViTAttention
|
import torch
from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer
from .configuration_mobilevit import MobileViTConfig
from torch import nn
class MobileViTAttention(nn.Module):
def __init__(self, config: MobileViTConfig, hidden_size: int) -> None:
super().__init__()
self.attention = MobileViTSelfAttention(config, hidden_size)
self.output = MobileViTSelfOutput(config, hidden_size)
self.pruned_heads = set()
def prune_heads(self, heads: set[int]) -> None:
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(heads, self.attention.num_attention_heads, self.attention.attention_head_size, self.pruned_heads)
self.attention.query = prune_linear_layer(self.attention.query, index)
self.attention.key = prune_linear_layer(self.attention.key, index)
self.attention.value = prune_linear_layer(self.attention.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
self.attention.num_attention_heads = self.attention.num_attention_heads - len(heads)
self.attention.all_head_size = self.attention.attention_head_size * self.attention.num_attention_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
self_outputs = self.attention(hidden_states)
attention_output = self.output(self_outputs)
return attention_output
|
class MobileViTAttention(nn.Module):
def __init__(self, config: MobileViTConfig, hidden_size: int) -> None:
pass
def prune_heads(self, heads: set[int]) -> None:
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
| 4
| 0
| 9
| 1
| 7
| 1
| 1
| 0.09
| 1
| 7
| 3
| 0
| 3
| 3
| 3
| 13
| 29
| 4
| 23
| 10
| 19
| 2
| 21
| 10
| 17
| 2
| 1
| 1
| 4
|
3,916
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mobilevit/modeling_mobilevit.py
|
transformers.models.mobilevit.modeling_mobilevit.MobileViTConvLayer
|
from typing import Optional, Union
from .configuration_mobilevit import MobileViTConfig
import torch
from torch import nn
from ...activations import ACT2FN
class MobileViTConvLayer(nn.Module):
def __init__(self, config: MobileViTConfig, in_channels: int, out_channels: int, kernel_size: int, stride: int=1, groups: int=1, bias: bool=False, dilation: int=1, use_normalization: bool=True, use_activation: Union[bool, str]=True) -> None:
super().__init__()
padding = int((kernel_size - 1) / 2) * dilation
if in_channels % groups != 0:
raise ValueError(f'Input channels ({in_channels}) are not divisible by {groups} groups.')
if out_channels % groups != 0:
raise ValueError(f'Output channels ({out_channels}) are not divisible by {groups} groups.')
self.convolution = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias, padding_mode='zeros')
if use_normalization:
self.normalization = nn.BatchNorm2d(num_features=out_channels, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
else:
self.normalization = None
if use_activation:
if isinstance(use_activation, str):
self.activation = ACT2FN[use_activation]
elif isinstance(config.hidden_act, str):
self.activation = ACT2FN[config.hidden_act]
else:
self.activation = config.hidden_act
else:
self.activation = None
def forward(self, features: torch.Tensor) -> torch.Tensor:
features = self.convolution(features)
if self.normalization is not None:
features = self.normalization(features)
if self.activation is not None:
features = self.activation(features)
return features
|
class MobileViTConvLayer(nn.Module):
def __init__(self, config: MobileViTConfig, in_channels: int, out_channels: int, kernel_size: int, stride: int=1, groups: int=1, bias: bool=False, dilation: int=1, use_normalization: bool=True, use_activation: Union[bool, str]=True) -> None:
pass
def forward(self, features: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 30
| 2
| 28
| 0
| 5
| 0
| 1
| 7
| 1
| 0
| 2
| 3
| 2
| 12
| 62
| 5
| 57
| 19
| 42
| 0
| 25
| 7
| 22
| 7
| 1
| 2
| 10
|
3,917
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mobilevit/modeling_mobilevit.py
|
transformers.models.mobilevit.modeling_mobilevit.MobileViTDeepLabV3
|
from .configuration_mobilevit import MobileViTConfig
from torch import nn
import torch
class MobileViTDeepLabV3(nn.Module):
"""
DeepLabv3 architecture: https://huggingface.co/papers/1706.05587
"""
def __init__(self, config: MobileViTConfig) -> None:
super().__init__()
self.aspp = MobileViTASPP(config)
self.dropout = nn.Dropout2d(config.classifier_dropout_prob)
self.classifier = MobileViTConvLayer(config, in_channels=config.aspp_out_channels, out_channels=config.num_labels, kernel_size=1, use_normalization=False, use_activation=False, bias=True)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
features = self.aspp(hidden_states[-1])
features = self.dropout(features)
features = self.classifier(features)
return features
|
class MobileViTDeepLabV3(nn.Module):
'''
DeepLabv3 architecture: https://huggingface.co/papers/1706.05587
'''
def __init__(self, config: MobileViTConfig) -> None:
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
| 3
| 1
| 10
| 1
| 9
| 0
| 1
| 0.16
| 1
| 5
| 3
| 0
| 2
| 3
| 2
| 12
| 26
| 4
| 19
| 7
| 16
| 3
| 11
| 7
| 8
| 1
| 1
| 0
| 2
|
3,918
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mobilevit/modeling_mobilevit.py
|
transformers.models.mobilevit.modeling_mobilevit.MobileViTEncoder
|
from .configuration_mobilevit import MobileViTConfig
from torch import nn
from ...modeling_outputs import BaseModelOutputWithNoAttention, BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention, SemanticSegmenterOutput
import torch
from typing import Optional, Union
class MobileViTEncoder(nn.Module):
def __init__(self, config: MobileViTConfig) -> None:
super().__init__()
self.config = config
self.layer = nn.ModuleList()
self.gradient_checkpointing = False
dilate_layer_4 = dilate_layer_5 = False
if config.output_stride == 8:
dilate_layer_4 = True
dilate_layer_5 = True
elif config.output_stride == 16:
dilate_layer_5 = True
dilation = 1
layer_1 = MobileViTMobileNetLayer(config, in_channels=config.neck_hidden_sizes[0], out_channels=config.neck_hidden_sizes[1], stride=1, num_stages=1)
self.layer.append(layer_1)
layer_2 = MobileViTMobileNetLayer(config, in_channels=config.neck_hidden_sizes[1], out_channels=config.neck_hidden_sizes[2], stride=2, num_stages=3)
self.layer.append(layer_2)
layer_3 = MobileViTLayer(config, in_channels=config.neck_hidden_sizes[2], out_channels=config.neck_hidden_sizes[3], stride=2, hidden_size=config.hidden_sizes[0], num_stages=2)
self.layer.append(layer_3)
if dilate_layer_4:
dilation *= 2
layer_4 = MobileViTLayer(config, in_channels=config.neck_hidden_sizes[3], out_channels=config.neck_hidden_sizes[4], stride=2, hidden_size=config.hidden_sizes[1], num_stages=4, dilation=dilation)
self.layer.append(layer_4)
if dilate_layer_5:
dilation *= 2
layer_5 = MobileViTLayer(config, in_channels=config.neck_hidden_sizes[4], out_channels=config.neck_hidden_sizes[5], stride=2, hidden_size=config.hidden_sizes[2], num_stages=3, dilation=dilation)
self.layer.append(layer_5)
def forward(self, hidden_states: torch.Tensor, output_hidden_states: bool=False, return_dict: bool=True) -> Union[tuple, BaseModelOutputWithNoAttention]:
all_hidden_states = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer):
hidden_states = layer_module(hidden_states)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple((v for v in [hidden_states, all_hidden_states] if v is not None))
return BaseModelOutputWithNoAttention(last_hidden_state=hidden_states, hidden_states=all_hidden_states)
|
class MobileViTEncoder(nn.Module):
def __init__(self, config: MobileViTConfig) -> None:
pass
def forward(self, hidden_states: torch.Tensor, output_hidden_states: bool=False, return_dict: bool=True) -> Union[tuple, BaseModelOutputWithNoAttention]:
pass
| 3
| 0
| 49
| 7
| 41
| 1
| 6
| 0.02
| 1
| 9
| 4
| 0
| 2
| 3
| 2
| 12
| 99
| 15
| 82
| 20
| 74
| 2
| 37
| 15
| 34
| 6
| 1
| 2
| 11
|
3,919
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mobilevit/modeling_mobilevit.py
|
transformers.models.mobilevit.modeling_mobilevit.MobileViTForImageClassification
|
from typing import Optional, Union
from ...modeling_outputs import BaseModelOutputWithNoAttention, BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention, SemanticSegmenterOutput
from torch import nn
from ...utils import auto_docstring, logging, torch_int
from .configuration_mobilevit import MobileViTConfig
import torch
@auto_docstring(custom_intro='\n MobileViT model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ')
class MobileViTForImageClassification(MobileViTPreTrainedModel):
def __init__(self, config: MobileViTConfig) -> None:
super().__init__(config)
self.num_labels = config.num_labels
self.mobilevit = MobileViTModel(config)
self.dropout = nn.Dropout(config.classifier_dropout_prob, inplace=True)
self.classifier = nn.Linear(config.neck_hidden_sizes[-1], config.num_labels) if config.num_labels > 0 else nn.Identity()
self.post_init()
@auto_docstring
def forward(self, pixel_values: Optional[torch.Tensor]=None, output_hidden_states: Optional[bool]=None, labels: Optional[torch.Tensor]=None, return_dict: Optional[bool]=None) -> Union[tuple, ImageClassifierOutputWithNoAttention]:
"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss). If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.mobilevit(pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict)
pooled_output = outputs.pooler_output if return_dict else outputs[1]
logits = self.classifier(self.dropout(pooled_output))
loss = None
if labels is not None:
loss = self.loss_function(labels, logits, self.config)
if not return_dict:
output = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=loss, logits=logits, hidden_states=outputs.hidden_states)
|
@auto_docstring(custom_intro='\n MobileViT model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ')
class MobileViTForImageClassification(MobileViTPreTrainedModel):
def __init__(self, config: MobileViTConfig) -> None:
pass
@auto_docstring
def forward(self, pixel_values: Optional[torch.Tensor]=None, output_hidden_states: Optional[bool]=None, labels: Optional[torch.Tensor]=None, return_dict: Optional[bool]=None) -> Union[tuple, ImageClassifierOutputWithNoAttention]:
'''
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss). If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
'''
pass
| 5
| 1
| 34
| 5
| 25
| 4
| 8
| 0.14
| 1
| 8
| 3
| 0
| 2
| 4
| 2
| 3
| 76
| 11
| 57
| 20
| 41
| 8
| 33
| 13
| 30
| 13
| 2
| 3
| 15
|
3,920
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mobilevit/modeling_mobilevit.py
|
transformers.models.mobilevit.modeling_mobilevit.MobileViTForSemanticSegmentation
|
from .configuration_mobilevit import MobileViTConfig
from typing import Optional, Union
from ...modeling_outputs import BaseModelOutputWithNoAttention, BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention, SemanticSegmenterOutput
from torch import nn
from torch.nn import CrossEntropyLoss
import torch
from ...utils import auto_docstring, logging, torch_int
@auto_docstring(custom_intro='\n MobileViT model with a semantic segmentation head on top, e.g. for Pascal VOC.\n ')
class MobileViTForSemanticSegmentation(MobileViTPreTrainedModel):
def __init__(self, config: MobileViTConfig) -> None:
super().__init__(config)
self.num_labels = config.num_labels
self.mobilevit = MobileViTModel(config, expand_output=False)
self.segmentation_head = MobileViTDeepLabV3(config)
self.post_init()
@auto_docstring
def forward(self, pixel_values: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, SemanticSegmenterOutput]:
"""
labels (`torch.LongTensor` of shape `(batch_size, height, width)`, *optional*):
Ground truth semantic segmentation maps for computing the loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels > 1`, a classification loss is computed (Cross-Entropy).
Examples:
```python
>>> import requests
>>> import torch
>>> from PIL import Image
>>> from transformers import AutoImageProcessor, MobileViTForSemanticSegmentation
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> image_processor = AutoImageProcessor.from_pretrained("apple/deeplabv3-mobilevit-small")
>>> model = MobileViTForSemanticSegmentation.from_pretrained("apple/deeplabv3-mobilevit-small")
>>> inputs = image_processor(images=image, return_tensors="pt")
>>> with torch.no_grad():
... outputs = model(**inputs)
>>> # logits are of shape (batch_size, num_labels, height, width)
>>> logits = outputs.logits
```"""
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if labels is not None and self.config.num_labels == 1:
raise ValueError('The number of labels should be greater than one')
outputs = self.mobilevit(pixel_values, output_hidden_states=True, return_dict=return_dict)
encoder_hidden_states = outputs.hidden_states if return_dict else outputs[1]
logits = self.segmentation_head(encoder_hidden_states)
loss = None
if labels is not None:
upsampled_logits = nn.functional.interpolate(logits, size=labels.shape[-2:], mode='bilinear', align_corners=False)
loss_fct = CrossEntropyLoss(ignore_index=self.config.semantic_loss_ignore_index)
loss = loss_fct(upsampled_logits, labels)
if not return_dict:
if output_hidden_states:
output = (logits,) + outputs[1:]
else:
output = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return SemanticSegmenterOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states if output_hidden_states else None, attentions=None)
|
@auto_docstring(custom_intro='\n MobileViT model with a semantic segmentation head on top, e.g. for Pascal VOC.\n ')
class MobileViTForSemanticSegmentation(MobileViTPreTrainedModel):
def __init__(self, config: MobileViTConfig) -> None:
pass
@auto_docstring
def forward(self, pixel_values: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, SemanticSegmenterOutput]:
'''
labels (`torch.LongTensor` of shape `(batch_size, height, width)`, *optional*):
Ground truth semantic segmentation maps for computing the loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels > 1`, a classification loss is computed (Cross-Entropy).
Examples:
```python
>>> import requests
>>> import torch
>>> from PIL import Image
>>> from transformers import AutoImageProcessor, MobileViTForSemanticSegmentation
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> image_processor = AutoImageProcessor.from_pretrained("apple/deeplabv3-mobilevit-small")
>>> model = MobileViTForSemanticSegmentation.from_pretrained("apple/deeplabv3-mobilevit-small")
>>> inputs = image_processor(images=image, return_tensors="pt")
>>> with torch.no_grad():
... outputs = model(**inputs)
>>> # logits are of shape (batch_size, num_labels, height, width)
>>> logits = outputs.logits
```'''
pass
| 5
| 1
| 43
| 9
| 23
| 12
| 6
| 0.5
| 1
| 9
| 4
| 0
| 2
| 3
| 2
| 3
| 89
| 18
| 48
| 20
| 37
| 24
| 26
| 13
| 23
| 10
| 2
| 2
| 11
|
3,921
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mobilevit/modeling_mobilevit.py
|
transformers.models.mobilevit.modeling_mobilevit.MobileViTIntermediate
|
import torch
from .configuration_mobilevit import MobileViTConfig
from torch import nn
from ...activations import ACT2FN
class MobileViTIntermediate(nn.Module):
def __init__(self, config: MobileViTConfig, hidden_size: int, intermediate_size: int) -> None:
super().__init__()
self.dense = nn.Linear(hidden_size, intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
|
class MobileViTIntermediate(nn.Module):
def __init__(self, config: MobileViTConfig, hidden_size: int, intermediate_size: int) -> None:
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 6
| 0
| 6
| 0
| 2
| 0
| 1
| 5
| 1
| 0
| 2
| 2
| 2
| 12
| 13
| 1
| 12
| 5
| 9
| 0
| 11
| 5
| 8
| 2
| 1
| 1
| 3
|
3,922
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mobilevit/modeling_mobilevit.py
|
transformers.models.mobilevit.modeling_mobilevit.MobileViTInvertedResidual
|
from torch import nn
from .configuration_mobilevit import MobileViTConfig
import torch
class MobileViTInvertedResidual(nn.Module):
"""
Inverted residual block (MobileNetv2): https://huggingface.co/papers/1801.04381
"""
def __init__(self, config: MobileViTConfig, in_channels: int, out_channels: int, stride: int, dilation: int=1) -> None:
super().__init__()
expanded_channels = make_divisible(int(round(in_channels * config.expand_ratio)), 8)
if stride not in [1, 2]:
raise ValueError(f'Invalid stride {stride}.')
self.use_residual = stride == 1 and in_channels == out_channels
self.expand_1x1 = MobileViTConvLayer(config, in_channels=in_channels, out_channels=expanded_channels, kernel_size=1)
self.conv_3x3 = MobileViTConvLayer(config, in_channels=expanded_channels, out_channels=expanded_channels, kernel_size=3, stride=stride, groups=expanded_channels, dilation=dilation)
self.reduce_1x1 = MobileViTConvLayer(config, in_channels=expanded_channels, out_channels=out_channels, kernel_size=1, use_activation=False)
def forward(self, features: torch.Tensor) -> torch.Tensor:
residual = features
features = self.expand_1x1(features)
features = self.conv_3x3(features)
features = self.reduce_1x1(features)
return residual + features if self.use_residual else features
|
class MobileViTInvertedResidual(nn.Module):
'''
Inverted residual block (MobileNetv2): https://huggingface.co/papers/1801.04381
'''
def __init__(self, config: MobileViTConfig, in_channels: int, out_channels: int, stride: int, dilation: int=1) -> None:
pass
def forward(self, features: torch.Tensor) -> torch.Tensor:
pass
| 3
| 1
| 20
| 4
| 17
| 0
| 2
| 0.09
| 1
| 6
| 2
| 0
| 2
| 4
| 2
| 12
| 46
| 9
| 34
| 11
| 29
| 3
| 16
| 9
| 13
| 2
| 1
| 1
| 4
|
3,923
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mobilevit/modeling_mobilevit.py
|
transformers.models.mobilevit.modeling_mobilevit.MobileViTLayer
|
from ...utils import auto_docstring, logging, torch_int
from .configuration_mobilevit import MobileViTConfig
from torch import nn
import torch
from ...modeling_layers import GradientCheckpointingLayer
import math
class MobileViTLayer(GradientCheckpointingLayer):
"""
MobileViT block: https://huggingface.co/papers/2110.02178
"""
def __init__(self, config: MobileViTConfig, in_channels: int, out_channels: int, stride: int, hidden_size: int, num_stages: int, dilation: int=1) -> None:
super().__init__()
self.patch_width = config.patch_size
self.patch_height = config.patch_size
if stride == 2:
self.downsampling_layer = MobileViTInvertedResidual(config, in_channels=in_channels, out_channels=out_channels, stride=stride if dilation == 1 else 1, dilation=dilation // 2 if dilation > 1 else 1)
in_channels = out_channels
else:
self.downsampling_layer = None
self.conv_kxk = MobileViTConvLayer(config, in_channels=in_channels, out_channels=in_channels, kernel_size=config.conv_kernel_size)
self.conv_1x1 = MobileViTConvLayer(config, in_channels=in_channels, out_channels=hidden_size, kernel_size=1, use_normalization=False, use_activation=False)
self.transformer = MobileViTTransformer(config, hidden_size=hidden_size, num_stages=num_stages)
self.layernorm = nn.LayerNorm(hidden_size, eps=config.layer_norm_eps)
self.conv_projection = MobileViTConvLayer(config, in_channels=hidden_size, out_channels=in_channels, kernel_size=1)
self.fusion = MobileViTConvLayer(config, in_channels=2 * in_channels, out_channels=in_channels, kernel_size=config.conv_kernel_size)
def unfolding(self, features: torch.Tensor) -> tuple[torch.Tensor, dict]:
patch_width, patch_height = (self.patch_width, self.patch_height)
patch_area = int(patch_width * patch_height)
batch_size, channels, orig_height, orig_width = features.shape
new_height = torch_int(torch.ceil(orig_height / patch_height) * patch_height) if torch.jit.is_tracing() else int(math.ceil(orig_height / patch_height) * patch_height)
new_width = torch_int(torch.ceil(orig_width / patch_width) * patch_width) if torch.jit.is_tracing() else int(math.ceil(orig_width / patch_width) * patch_width)
interpolate = False
if new_width != orig_width or new_height != orig_height:
features = nn.functional.interpolate(features, size=(new_height, new_width), mode='bilinear', align_corners=False)
interpolate = True
num_patch_width = new_width // patch_width
num_patch_height = new_height // patch_height
num_patches = num_patch_height * num_patch_width
patches = features.reshape(batch_size * channels * num_patch_height, patch_height, num_patch_width, patch_width)
patches = patches.transpose(1, 2)
patches = patches.reshape(batch_size, channels, num_patches, patch_area)
patches = patches.transpose(1, 3)
patches = patches.reshape(batch_size * patch_area, num_patches, -1)
info_dict = {'orig_size': (orig_height, orig_width), 'batch_size': batch_size, 'channels': channels, 'interpolate': interpolate, 'num_patches': num_patches, 'num_patches_width': num_patch_width, 'num_patches_height': num_patch_height}
return (patches, info_dict)
def folding(self, patches: torch.Tensor, info_dict: dict) -> torch.Tensor:
patch_width, patch_height = (self.patch_width, self.patch_height)
patch_area = int(patch_width * patch_height)
batch_size = info_dict['batch_size']
channels = info_dict['channels']
num_patches = info_dict['num_patches']
num_patch_height = info_dict['num_patches_height']
num_patch_width = info_dict['num_patches_width']
features = patches.contiguous().view(batch_size, patch_area, num_patches, -1)
features = features.transpose(1, 3)
features = features.reshape(batch_size * channels * num_patch_height, num_patch_width, patch_height, patch_width)
features = features.transpose(1, 2)
features = features.reshape(batch_size, channels, num_patch_height * patch_height, num_patch_width * patch_width)
if info_dict['interpolate']:
features = nn.functional.interpolate(features, size=info_dict['orig_size'], mode='bilinear', align_corners=False)
return features
def forward(self, features: torch.Tensor) -> torch.Tensor:
if self.downsampling_layer:
features = self.downsampling_layer(features)
residual = features
features = self.conv_kxk(features)
features = self.conv_1x1(features)
patches, info_dict = self.unfolding(features)
patches = self.transformer(patches)
patches = self.layernorm(patches)
features = self.folding(patches, info_dict)
features = self.conv_projection(features)
features = self.fusion(torch.cat((residual, features), dim=1))
return features
|
class MobileViTLayer(GradientCheckpointingLayer):
'''
MobileViT block: https://huggingface.co/papers/2110.02178
'''
def __init__(self, config: MobileViTConfig, in_channels: int, out_channels: int, stride: int, hidden_size: int, num_stages: int, dilation: int=1) -> None:
pass
def unfolding(self, features: torch.Tensor) -> tuple[torch.Tensor, dict]:
pass
def folding(self, patches: torch.Tensor, info_dict: dict) -> torch.Tensor:
pass
def forward(self, features: torch.Tensor) -> torch.Tensor:
pass
| 5
| 1
| 40
| 6
| 31
| 3
| 3
| 0.11
| 1
| 7
| 4
| 0
| 4
| 9
| 4
| 14
| 167
| 27
| 126
| 44
| 112
| 14
| 64
| 35
| 59
| 4
| 1
| 1
| 12
|
3,924
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mobilevit/modeling_mobilevit.py
|
transformers.models.mobilevit.modeling_mobilevit.MobileViTMobileNetLayer
|
import torch
from .configuration_mobilevit import MobileViTConfig
from torch import nn
class MobileViTMobileNetLayer(nn.Module):
def __init__(self, config: MobileViTConfig, in_channels: int, out_channels: int, stride: int=1, num_stages: int=1) -> None:
super().__init__()
self.layer = nn.ModuleList()
for i in range(num_stages):
layer = MobileViTInvertedResidual(config, in_channels=in_channels, out_channels=out_channels, stride=stride if i == 0 else 1)
self.layer.append(layer)
in_channels = out_channels
def forward(self, features: torch.Tensor) -> torch.Tensor:
for layer_module in self.layer:
features = layer_module(features)
return features
|
class MobileViTMobileNetLayer(nn.Module):
def __init__(self, config: MobileViTConfig, in_channels: int, out_channels: int, stride: int=1, num_stages: int=1) -> None:
pass
def forward(self, features: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 10
| 1
| 9
| 0
| 3
| 0
| 1
| 6
| 2
| 0
| 2
| 1
| 2
| 12
| 21
| 2
| 19
| 9
| 14
| 0
| 12
| 7
| 9
| 3
| 1
| 1
| 5
|
3,925
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mobilevit/modeling_mobilevit.py
|
transformers.models.mobilevit.modeling_mobilevit.MobileViTModel
|
from .configuration_mobilevit import MobileViTConfig
from typing import Optional, Union
from ...utils import auto_docstring, logging, torch_int
import torch
from ...modeling_outputs import BaseModelOutputWithNoAttention, BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention, SemanticSegmenterOutput
@auto_docstring
class MobileViTModel(MobileViTPreTrainedModel):
def __init__(self, config: MobileViTConfig, expand_output: bool=True):
"""
expand_output (`bool`, *optional*, defaults to `True`):
Whether to expand the output of the model using a 1x1 convolution. If `True`, the model will apply an additional
1x1 convolution to expand the output channels from `config.neck_hidden_sizes[5]` to `config.neck_hidden_sizes[6]`.
"""
super().__init__(config)
self.config = config
self.expand_output = expand_output
self.conv_stem = MobileViTConvLayer(config, in_channels=config.num_channels, out_channels=config.neck_hidden_sizes[0], kernel_size=3, stride=2)
self.encoder = MobileViTEncoder(config)
if self.expand_output:
self.conv_1x1_exp = MobileViTConvLayer(config, in_channels=config.neck_hidden_sizes[5], out_channels=config.neck_hidden_sizes[6], kernel_size=1)
self.post_init()
def _prune_heads(self, heads_to_prune):
"""Prunes heads of the model.
heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel
"""
for layer_index, heads in heads_to_prune.items():
mobilevit_layer = self.encoder.layer[layer_index]
if isinstance(mobilevit_layer, MobileViTLayer):
for transformer_layer in mobilevit_layer.transformer.layer:
transformer_layer.attention.prune_heads(heads)
@auto_docstring
def forward(self, pixel_values: Optional[torch.Tensor]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutputWithPoolingAndNoAttention]:
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('You have to specify pixel_values')
embedding_output = self.conv_stem(pixel_values)
encoder_outputs = self.encoder(embedding_output, output_hidden_states=output_hidden_states, return_dict=return_dict)
if self.expand_output:
last_hidden_state = self.conv_1x1_exp(encoder_outputs[0])
pooled_output = torch.mean(last_hidden_state, dim=[-2, -1], keepdim=False)
else:
last_hidden_state = encoder_outputs[0]
pooled_output = None
if not return_dict:
output = (last_hidden_state, pooled_output) if pooled_output is not None else (last_hidden_state,)
return output + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(last_hidden_state=last_hidden_state, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states)
|
@auto_docstring
class MobileViTModel(MobileViTPreTrainedModel):
def __init__(self, config: MobileViTConfig, expand_output: bool=True):
'''
expand_output (`bool`, *optional*, defaults to `True`):
Whether to expand the output of the model using a 1x1 convolution. If `True`, the model will apply an additional
1x1 convolution to expand the output channels from `config.neck_hidden_sizes[5]` to `config.neck_hidden_sizes[6]`.
'''
pass
def _prune_heads(self, heads_to_prune):
'''Prunes heads of the model.
heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel
'''
pass
@auto_docstring
def forward(self, pixel_values: Optional[torch.Tensor]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutputWithPoolingAndNoAttention]:
pass
| 6
| 2
| 25
| 4
| 19
| 2
| 4
| 0.07
| 1
| 10
| 5
| 0
| 3
| 5
| 3
| 4
| 85
| 13
| 67
| 23
| 50
| 5
| 32
| 17
| 28
| 7
| 2
| 3
| 13
|
3,926
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mobilevit/modeling_mobilevit.py
|
transformers.models.mobilevit.modeling_mobilevit.MobileViTOutput
|
import torch
from .configuration_mobilevit import MobileViTConfig
from torch import nn
class MobileViTOutput(nn.Module):
def __init__(self, config: MobileViTConfig, hidden_size: int, intermediate_size: int) -> None:
super().__init__()
self.dense = nn.Linear(intermediate_size, hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = hidden_states + input_tensor
return hidden_states
|
class MobileViTOutput(nn.Module):
def __init__(self, config: MobileViTConfig, hidden_size: int, intermediate_size: int) -> None:
pass
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 5
| 0
| 5
| 0
| 1
| 0
| 1
| 4
| 1
| 0
| 2
| 2
| 2
| 12
| 11
| 1
| 10
| 5
| 7
| 0
| 10
| 5
| 7
| 1
| 1
| 0
| 2
|
3,927
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mobilevit/modeling_mobilevit.py
|
transformers.models.mobilevit.modeling_mobilevit.MobileViTPreTrainedModel
|
from ...modeling_utils import PreTrainedModel
from torch import nn
from .configuration_mobilevit import MobileViTConfig
from ...utils import auto_docstring, logging, torch_int
@auto_docstring
class MobileViTPreTrainedModel(PreTrainedModel):
config: MobileViTConfig
base_model_prefix = 'mobilevit'
main_input_name = 'pixel_values'
supports_gradient_checkpointing = True
_no_split_modules = ['MobileViTLayer']
def _init_weights(self, module: nn.Module) -> None:
"""Initialize the weights"""
if isinstance(module, (nn.Linear, nn.Conv2d, nn.BatchNorm2d)):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
|
@auto_docstring
class MobileViTPreTrainedModel(PreTrainedModel):
def _init_weights(self, module: nn.Module) -> None:
'''Initialize the weights'''
pass
| 3
| 1
| 11
| 0
| 8
| 3
| 4
| 0.5
| 1
| 0
| 0
| 3
| 1
| 0
| 1
| 1
| 23
| 2
| 14
| 7
| 12
| 7
| 13
| 7
| 11
| 4
| 1
| 2
| 4
|
3,928
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mobilevit/modeling_mobilevit.py
|
transformers.models.mobilevit.modeling_mobilevit.MobileViTSelfAttention
|
from .configuration_mobilevit import MobileViTConfig
import torch
from torch import nn
import math
class MobileViTSelfAttention(nn.Module):
def __init__(self, config: MobileViTConfig, hidden_size: int) -> None:
super().__init__()
if hidden_size % config.num_attention_heads != 0:
raise ValueError(f'The hidden size {hidden_size} is not a multiple of the number of attention heads {config.num_attention_heads}.')
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(hidden_size, self.all_head_size, bias=config.qkv_bias)
self.key = nn.Linear(hidden_size, self.all_head_size, bias=config.qkv_bias)
self.value = nn.Linear(hidden_size, self.all_head_size, bias=config.qkv_bias)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
batch_size, seq_length, _ = hidden_states.shape
query_layer = self.query(hidden_states).view(batch_size, -1, self.num_attention_heads, self.attention_head_size).transpose(1, 2)
key_layer = self.key(hidden_states).view(batch_size, -1, self.num_attention_heads, self.attention_head_size).transpose(1, 2)
value_layer = self.value(hidden_states).view(batch_size, -1, self.num_attention_heads, self.attention_head_size).transpose(1, 2)
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
attention_probs = nn.functional.softmax(attention_scores, dim=-1)
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
return context_layer
|
class MobileViTSelfAttention(nn.Module):
def __init__(self, config: MobileViTConfig, hidden_size: int) -> None:
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 15
| 3
| 11
| 1
| 1
| 0.12
| 1
| 5
| 1
| 0
| 3
| 7
| 3
| 13
| 49
| 12
| 33
| 20
| 29
| 4
| 30
| 20
| 26
| 2
| 1
| 1
| 4
|
3,929
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mobilevit/modeling_mobilevit.py
|
transformers.models.mobilevit.modeling_mobilevit.MobileViTSelfOutput
|
from .configuration_mobilevit import MobileViTConfig
from torch import nn
import torch
class MobileViTSelfOutput(nn.Module):
def __init__(self, config: MobileViTConfig, hidden_size: int) -> None:
super().__init__()
self.dense = nn.Linear(hidden_size, hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
return hidden_states
|
class MobileViTSelfOutput(nn.Module):
def __init__(self, config: MobileViTConfig, hidden_size: int) -> None:
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 4
| 0
| 4
| 0
| 1
| 0
| 1
| 4
| 1
| 0
| 2
| 2
| 2
| 12
| 10
| 1
| 9
| 5
| 6
| 0
| 9
| 5
| 6
| 1
| 1
| 0
| 2
|
3,930
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mobilevit/modeling_mobilevit.py
|
transformers.models.mobilevit.modeling_mobilevit.MobileViTTransformer
|
from torch import nn
import torch
from .configuration_mobilevit import MobileViTConfig
class MobileViTTransformer(nn.Module):
def __init__(self, config: MobileViTConfig, hidden_size: int, num_stages: int) -> None:
super().__init__()
self.layer = nn.ModuleList()
for _ in range(num_stages):
transformer_layer = MobileViTTransformerLayer(config, hidden_size=hidden_size, intermediate_size=int(hidden_size * config.mlp_ratio))
self.layer.append(transformer_layer)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
for layer_module in self.layer:
hidden_states = layer_module(hidden_states)
return hidden_states
|
class MobileViTTransformer(nn.Module):
def __init__(self, config: MobileViTConfig, hidden_size: int, num_stages: int) -> None:
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 8
| 1
| 7
| 0
| 2
| 0
| 1
| 6
| 2
| 0
| 2
| 1
| 2
| 12
| 17
| 2
| 15
| 7
| 12
| 0
| 11
| 7
| 8
| 2
| 1
| 1
| 4
|
3,931
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mobilevit/modeling_mobilevit.py
|
transformers.models.mobilevit.modeling_mobilevit.MobileViTTransformerLayer
|
import torch
from torch import nn
from .configuration_mobilevit import MobileViTConfig
class MobileViTTransformerLayer(nn.Module):
def __init__(self, config: MobileViTConfig, hidden_size: int, intermediate_size: int) -> None:
super().__init__()
self.attention = MobileViTAttention(config, hidden_size)
self.intermediate = MobileViTIntermediate(config, hidden_size, intermediate_size)
self.output = MobileViTOutput(config, hidden_size, intermediate_size)
self.layernorm_before = nn.LayerNorm(hidden_size, eps=config.layer_norm_eps)
self.layernorm_after = nn.LayerNorm(hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
attention_output = self.attention(self.layernorm_before(hidden_states))
hidden_states = attention_output + hidden_states
layer_output = self.layernorm_after(hidden_states)
layer_output = self.intermediate(layer_output)
layer_output = self.output(layer_output, hidden_states)
return layer_output
|
class MobileViTTransformerLayer(nn.Module):
def __init__(self, config: MobileViTConfig, hidden_size: int, intermediate_size: int) -> None:
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 8
| 1
| 7
| 0
| 1
| 0
| 1
| 7
| 4
| 0
| 2
| 5
| 2
| 12
| 17
| 2
| 15
| 10
| 12
| 0
| 15
| 10
| 12
| 1
| 1
| 0
| 2
|
3,932
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mobilevitv2/configuration_mobilevitv2.py
|
transformers.models.mobilevitv2.configuration_mobilevitv2.MobileViTV2Config
|
from ...configuration_utils import PretrainedConfig
class MobileViTV2Config(PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`MobileViTV2Model`]. It is used to instantiate a
MobileViTV2 model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the MobileViTV2
[apple/mobilevitv2-1.0](https://huggingface.co/apple/mobilevitv2-1.0) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
num_channels (`int`, *optional*, defaults to 3):
The number of input channels.
image_size (`int`, *optional*, defaults to 256):
The size (resolution) of each image.
patch_size (`int`, *optional*, defaults to 2):
The size (resolution) of each patch.
expand_ratio (`float`, *optional*, defaults to 2.0):
Expansion factor for the MobileNetv2 layers.
hidden_act (`str` or `function`, *optional*, defaults to `"swish"`):
The non-linear activation function (function or string) in the Transformer encoder and convolution layers.
conv_kernel_size (`int`, *optional*, defaults to 3):
The size of the convolutional kernel in the MobileViTV2 layer.
output_stride (`int`, *optional*, defaults to 32):
The ratio of the spatial resolution of the output to the resolution of the input image.
classifier_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout ratio for attached classifiers.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the layer normalization layers.
aspp_out_channels (`int`, *optional*, defaults to 512):
Number of output channels used in the ASPP layer for semantic segmentation.
atrous_rates (`list[int]`, *optional*, defaults to `[6, 12, 18]`):
Dilation (atrous) factors used in the ASPP layer for semantic segmentation.
aspp_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout ratio for the ASPP layer for semantic segmentation.
semantic_loss_ignore_index (`int`, *optional*, defaults to 255):
The index that is ignored by the loss function of the semantic segmentation model.
n_attn_blocks (`list[int]`, *optional*, defaults to `[2, 4, 3]`):
The number of attention blocks in each MobileViTV2Layer
base_attn_unit_dims (`list[int]`, *optional*, defaults to `[128, 192, 256]`):
The base multiplier for dimensions of attention blocks in each MobileViTV2Layer
width_multiplier (`float`, *optional*, defaults to 1.0):
The width multiplier for MobileViTV2.
ffn_multiplier (`int`, *optional*, defaults to 2):
The FFN multiplier for MobileViTV2.
attn_dropout (`float`, *optional*, defaults to 0.0):
The dropout in the attention layer.
ffn_dropout (`float`, *optional*, defaults to 0.0):
The dropout between FFN layers.
Example:
```python
>>> from transformers import MobileViTV2Config, MobileViTV2Model
>>> # Initializing a mobilevitv2-small style configuration
>>> configuration = MobileViTV2Config()
>>> # Initializing a model from the mobilevitv2-small style configuration
>>> model = MobileViTV2Model(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = 'mobilevitv2'
def __init__(self, num_channels=3, image_size=256, patch_size=2, expand_ratio=2.0, hidden_act='swish', conv_kernel_size=3, output_stride=32, classifier_dropout_prob=0.1, initializer_range=0.02, layer_norm_eps=1e-05, aspp_out_channels=512, atrous_rates=[6, 12, 18], aspp_dropout_prob=0.1, semantic_loss_ignore_index=255, n_attn_blocks=[2, 4, 3], base_attn_unit_dims=[128, 192, 256], width_multiplier=1.0, ffn_multiplier=2, attn_dropout=0.0, ffn_dropout=0.0, **kwargs):
super().__init__(**kwargs)
self.num_channels = num_channels
self.image_size = image_size
self.patch_size = patch_size
self.expand_ratio = expand_ratio
self.hidden_act = hidden_act
self.conv_kernel_size = conv_kernel_size
self.output_stride = output_stride
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.n_attn_blocks = n_attn_blocks
self.base_attn_unit_dims = base_attn_unit_dims
self.width_multiplier = width_multiplier
self.ffn_multiplier = ffn_multiplier
self.ffn_dropout = ffn_dropout
self.attn_dropout = attn_dropout
self.classifier_dropout_prob = classifier_dropout_prob
self.aspp_out_channels = aspp_out_channels
self.atrous_rates = atrous_rates
self.aspp_dropout_prob = aspp_dropout_prob
self.semantic_loss_ignore_index = semantic_loss_ignore_index
|
class MobileViTV2Config(PretrainedConfig):
'''
This is the configuration class to store the configuration of a [`MobileViTV2Model`]. It is used to instantiate a
MobileViTV2 model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the MobileViTV2
[apple/mobilevitv2-1.0](https://huggingface.co/apple/mobilevitv2-1.0) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
num_channels (`int`, *optional*, defaults to 3):
The number of input channels.
image_size (`int`, *optional*, defaults to 256):
The size (resolution) of each image.
patch_size (`int`, *optional*, defaults to 2):
The size (resolution) of each patch.
expand_ratio (`float`, *optional*, defaults to 2.0):
Expansion factor for the MobileNetv2 layers.
hidden_act (`str` or `function`, *optional*, defaults to `"swish"`):
The non-linear activation function (function or string) in the Transformer encoder and convolution layers.
conv_kernel_size (`int`, *optional*, defaults to 3):
The size of the convolutional kernel in the MobileViTV2 layer.
output_stride (`int`, *optional*, defaults to 32):
The ratio of the spatial resolution of the output to the resolution of the input image.
classifier_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout ratio for attached classifiers.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the layer normalization layers.
aspp_out_channels (`int`, *optional*, defaults to 512):
Number of output channels used in the ASPP layer for semantic segmentation.
atrous_rates (`list[int]`, *optional*, defaults to `[6, 12, 18]`):
Dilation (atrous) factors used in the ASPP layer for semantic segmentation.
aspp_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout ratio for the ASPP layer for semantic segmentation.
semantic_loss_ignore_index (`int`, *optional*, defaults to 255):
The index that is ignored by the loss function of the semantic segmentation model.
n_attn_blocks (`list[int]`, *optional*, defaults to `[2, 4, 3]`):
The number of attention blocks in each MobileViTV2Layer
base_attn_unit_dims (`list[int]`, *optional*, defaults to `[128, 192, 256]`):
The base multiplier for dimensions of attention blocks in each MobileViTV2Layer
width_multiplier (`float`, *optional*, defaults to 1.0):
The width multiplier for MobileViTV2.
ffn_multiplier (`int`, *optional*, defaults to 2):
The FFN multiplier for MobileViTV2.
attn_dropout (`float`, *optional*, defaults to 0.0):
The dropout in the attention layer.
ffn_dropout (`float`, *optional*, defaults to 0.0):
The dropout between FFN layers.
Example:
```python
>>> from transformers import MobileViTV2Config, MobileViTV2Model
>>> # Initializing a mobilevitv2-small style configuration
>>> configuration = MobileViTV2Config()
>>> # Initializing a model from the mobilevitv2-small style configuration
>>> model = MobileViTV2Model(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```'''
def __init__(self, num_channels=3, image_size=256, patch_size=2, expand_ratio=2.0, hidden_act='swish', conv_kernel_size=3, output_stride=32, classifier_dropout_prob=0.1, initializer_range=0.02, layer_norm_eps=1e-05, aspp_out_channels=512, atrous_rates=[6, 12, 18], aspp_dropout_prob=0.1, semantic_loss_ignore_index=255, n_attn_blocks=[2, 4, 3], base_attn_unit_dims=[128, 192, 256], width_multiplier=1.0, ffn_multiplier=2, attn_dropout=0.0, ffn_dropout=0.0, **kwargs):
pass
| 2
| 1
| 48
| 2
| 45
| 1
| 1
| 1.26
| 1
| 1
| 0
| 0
| 1
| 20
| 1
| 1
| 117
| 11
| 47
| 46
| 22
| 59
| 24
| 23
| 22
| 1
| 1
| 0
| 1
|
3,933
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mobilevitv2/configuration_mobilevitv2.py
|
transformers.models.mobilevitv2.configuration_mobilevitv2.MobileViTV2OnnxConfig
|
from collections.abc import Mapping
from packaging import version
from ...onnx import OnnxConfig
from collections import OrderedDict
class MobileViTV2OnnxConfig(OnnxConfig):
torch_onnx_minimum_version = version.parse('1.11')
@property
def inputs(self) -> Mapping[str, Mapping[int, str]]:
return OrderedDict([('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'})])
@property
def outputs(self) -> Mapping[str, Mapping[int, str]]:
if self.task == 'image-classification':
return OrderedDict([('logits', {0: 'batch'})])
else:
return OrderedDict([('last_hidden_state', {0: 'batch'}), ('pooler_output', {0: 'batch'})])
@property
def atol_for_validation(self) -> float:
return 0.0001
|
class MobileViTV2OnnxConfig(OnnxConfig):
@property
def inputs(self) -> Mapping[str, Mapping[int, str]]:
pass
@property
def outputs(self) -> Mapping[str, Mapping[int, str]]:
pass
@property
def atol_for_validation(self) -> float:
pass
| 7
| 0
| 3
| 0
| 3
| 0
| 1
| 0
| 1
| 4
| 0
| 0
| 3
| 0
| 3
| 3
| 17
| 3
| 14
| 8
| 7
| 0
| 10
| 5
| 6
| 2
| 1
| 1
| 4
|
3,934
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mobilevitv2/modeling_mobilevitv2.py
|
transformers.models.mobilevitv2.modeling_mobilevitv2.MobileViTV2ASPP
|
import torch
from torch import nn
from .configuration_mobilevitv2 import MobileViTV2Config
class MobileViTV2ASPP(nn.Module):
"""
ASPP module defined in DeepLab papers: https://huggingface.co/papers/1606.00915, https://huggingface.co/papers/1706.05587
"""
def __init__(self, config: MobileViTV2Config) -> None:
super().__init__()
encoder_out_channels = make_divisible(512 * config.width_multiplier, divisor=8)
in_channels = encoder_out_channels
out_channels = config.aspp_out_channels
if len(config.atrous_rates) != 3:
raise ValueError('Expected 3 values for atrous_rates')
self.convs = nn.ModuleList()
in_projection = MobileViTV2ConvLayer(config, in_channels=in_channels, out_channels=out_channels, kernel_size=1, use_activation='relu')
self.convs.append(in_projection)
self.convs.extend([MobileViTV2ConvLayer(config, in_channels=in_channels, out_channels=out_channels, kernel_size=3, dilation=rate, use_activation='relu') for rate in config.atrous_rates])
pool_layer = MobileViTV2ASPPPooling(config, in_channels, out_channels)
self.convs.append(pool_layer)
self.project = MobileViTV2ConvLayer(config, in_channels=5 * out_channels, out_channels=out_channels, kernel_size=1, use_activation='relu')
self.dropout = nn.Dropout(p=config.aspp_dropout_prob)
def forward(self, features: torch.Tensor) -> torch.Tensor:
pyramid = []
for conv in self.convs:
pyramid.append(conv(features))
pyramid = torch.cat(pyramid, dim=1)
pooled_features = self.project(pyramid)
pooled_features = self.dropout(pooled_features)
return pooled_features
|
class MobileViTV2ASPP(nn.Module):
'''
ASPP module defined in DeepLab papers: https://huggingface.co/papers/1606.00915, https://huggingface.co/papers/1706.05587
'''
def __init__(self, config: MobileViTV2Config) -> None:
pass
def forward(self, features: torch.Tensor) -> torch.Tensor:
pass
| 3
| 1
| 26
| 5
| 22
| 1
| 2
| 0.09
| 1
| 6
| 3
| 0
| 2
| 3
| 2
| 12
| 58
| 11
| 44
| 14
| 41
| 4
| 24
| 14
| 21
| 2
| 1
| 1
| 4
|
3,935
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mobilevitv2/modeling_mobilevitv2.py
|
transformers.models.mobilevitv2.modeling_mobilevitv2.MobileViTV2ASPPPooling
|
from .configuration_mobilevitv2 import MobileViTV2Config
from torch import nn
import torch
class MobileViTV2ASPPPooling(nn.Module):
def __init__(self, config: MobileViTV2Config, in_channels: int, out_channels: int) -> None:
super().__init__()
self.global_pool = nn.AdaptiveAvgPool2d(output_size=1)
self.conv_1x1 = MobileViTV2ConvLayer(config, in_channels=in_channels, out_channels=out_channels, kernel_size=1, stride=1, use_normalization=True, use_activation='relu')
def forward(self, features: torch.Tensor) -> torch.Tensor:
spatial_size = features.shape[-2:]
features = self.global_pool(features)
features = self.conv_1x1(features)
features = nn.functional.interpolate(features, size=spatial_size, mode='bilinear', align_corners=False)
return features
|
class MobileViTV2ASPPPooling(nn.Module):
def __init__(self, config: MobileViTV2Config, in_channels: int, out_channels: int) -> None:
pass
def forward(self, features: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 10
| 1
| 9
| 0
| 1
| 0
| 1
| 5
| 2
| 0
| 2
| 2
| 2
| 12
| 22
| 3
| 19
| 6
| 16
| 0
| 11
| 6
| 8
| 1
| 1
| 0
| 2
|
3,936
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mobilevitv2/modeling_mobilevitv2.py
|
transformers.models.mobilevitv2.modeling_mobilevitv2.MobileViTV2ConvLayer
|
from torch import nn
from typing import Optional, Union
import torch
from ...activations import ACT2FN
from .configuration_mobilevitv2 import MobileViTV2Config
class MobileViTV2ConvLayer(nn.Module):
def __init__(self, config: MobileViTV2Config, in_channels: int, out_channels: int, kernel_size: int, stride: int=1, groups: int=1, bias: bool=False, dilation: int=1, use_normalization: bool=True, use_activation: Union[bool, str]=True) -> None:
super().__init__()
padding = int((kernel_size - 1) / 2) * dilation
if in_channels % groups != 0:
raise ValueError(f'Input channels ({in_channels}) are not divisible by {groups} groups.')
if out_channels % groups != 0:
raise ValueError(f'Output channels ({out_channels}) are not divisible by {groups} groups.')
self.convolution = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias, padding_mode='zeros')
if use_normalization:
self.normalization = nn.BatchNorm2d(num_features=out_channels, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
else:
self.normalization = None
if use_activation:
if isinstance(use_activation, str):
self.activation = ACT2FN[use_activation]
elif isinstance(config.hidden_act, str):
self.activation = ACT2FN[config.hidden_act]
else:
self.activation = config.hidden_act
else:
self.activation = None
def forward(self, features: torch.Tensor) -> torch.Tensor:
features = self.convolution(features)
if self.normalization is not None:
features = self.normalization(features)
if self.activation is not None:
features = self.activation(features)
return features
|
class MobileViTV2ConvLayer(nn.Module):
def __init__(self, config: MobileViTV2Config, in_channels: int, out_channels: int, kernel_size: int, stride: int=1, groups: int=1, bias: bool=False, dilation: int=1, use_normalization: bool=True, use_activation: Union[bool, str]=True) -> None:
pass
def forward(self, features: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 30
| 2
| 28
| 0
| 5
| 0
| 1
| 7
| 1
| 0
| 2
| 3
| 2
| 12
| 62
| 5
| 57
| 19
| 42
| 0
| 25
| 7
| 22
| 7
| 1
| 2
| 10
|
3,937
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mobilevitv2/modeling_mobilevitv2.py
|
transformers.models.mobilevitv2.modeling_mobilevitv2.MobileViTV2DeepLabV3
|
from torch import nn
from .configuration_mobilevitv2 import MobileViTV2Config
import torch
class MobileViTV2DeepLabV3(nn.Module):
"""
DeepLabv3 architecture: https://huggingface.co/papers/1706.05587
"""
def __init__(self, config: MobileViTV2Config) -> None:
super().__init__()
self.aspp = MobileViTV2ASPP(config)
self.dropout = nn.Dropout2d(config.classifier_dropout_prob)
self.classifier = MobileViTV2ConvLayer(config, in_channels=config.aspp_out_channels, out_channels=config.num_labels, kernel_size=1, use_normalization=False, use_activation=False, bias=True)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
features = self.aspp(hidden_states[-1])
features = self.dropout(features)
features = self.classifier(features)
return features
|
class MobileViTV2DeepLabV3(nn.Module):
'''
DeepLabv3 architecture: https://huggingface.co/papers/1706.05587
'''
def __init__(self, config: MobileViTV2Config) -> None:
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
| 3
| 1
| 10
| 1
| 9
| 0
| 1
| 0.16
| 1
| 5
| 3
| 0
| 2
| 3
| 2
| 12
| 26
| 4
| 19
| 7
| 16
| 3
| 11
| 7
| 8
| 1
| 1
| 0
| 2
|
3,938
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mobilevitv2/modeling_mobilevitv2.py
|
transformers.models.mobilevitv2.modeling_mobilevitv2.MobileViTV2Encoder
|
from .configuration_mobilevitv2 import MobileViTV2Config
from ...modeling_outputs import BaseModelOutputWithNoAttention, BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention, SemanticSegmenterOutput
from torch import nn
from typing import Optional, Union
import torch
class MobileViTV2Encoder(nn.Module):
def __init__(self, config: MobileViTV2Config) -> None:
super().__init__()
self.config = config
self.layer = nn.ModuleList()
self.gradient_checkpointing = False
dilate_layer_4 = dilate_layer_5 = False
if config.output_stride == 8:
dilate_layer_4 = True
dilate_layer_5 = True
elif config.output_stride == 16:
dilate_layer_5 = True
dilation = 1
layer_0_dim = make_divisible(clip(value=32 * config.width_multiplier, min_val=16, max_val=64), divisor=8, min_value=16)
layer_1_dim = make_divisible(64 * config.width_multiplier, divisor=16)
layer_2_dim = make_divisible(128 * config.width_multiplier, divisor=8)
layer_3_dim = make_divisible(256 * config.width_multiplier, divisor=8)
layer_4_dim = make_divisible(384 * config.width_multiplier, divisor=8)
layer_5_dim = make_divisible(512 * config.width_multiplier, divisor=8)
layer_1 = MobileViTV2MobileNetLayer(config, in_channels=layer_0_dim, out_channels=layer_1_dim, stride=1, num_stages=1)
self.layer.append(layer_1)
layer_2 = MobileViTV2MobileNetLayer(config, in_channels=layer_1_dim, out_channels=layer_2_dim, stride=2, num_stages=2)
self.layer.append(layer_2)
layer_3 = MobileViTV2Layer(config, in_channels=layer_2_dim, out_channels=layer_3_dim, attn_unit_dim=make_divisible(config.base_attn_unit_dims[0] * config.width_multiplier, divisor=8), n_attn_blocks=config.n_attn_blocks[0])
self.layer.append(layer_3)
if dilate_layer_4:
dilation *= 2
layer_4 = MobileViTV2Layer(config, in_channels=layer_3_dim, out_channels=layer_4_dim, attn_unit_dim=make_divisible(config.base_attn_unit_dims[1] * config.width_multiplier, divisor=8), n_attn_blocks=config.n_attn_blocks[1], dilation=dilation)
self.layer.append(layer_4)
if dilate_layer_5:
dilation *= 2
layer_5 = MobileViTV2Layer(config, in_channels=layer_4_dim, out_channels=layer_5_dim, attn_unit_dim=make_divisible(config.base_attn_unit_dims[2] * config.width_multiplier, divisor=8), n_attn_blocks=config.n_attn_blocks[2], dilation=dilation)
self.layer.append(layer_5)
def forward(self, hidden_states: torch.Tensor, output_hidden_states: bool=False, return_dict: bool=True) -> Union[tuple, BaseModelOutputWithNoAttention]:
all_hidden_states = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer):
hidden_states = layer_module(hidden_states)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple((v for v in [hidden_states, all_hidden_states] if v is not None))
return BaseModelOutputWithNoAttention(last_hidden_state=hidden_states, hidden_states=all_hidden_states)
|
class MobileViTV2Encoder(nn.Module):
def __init__(self, config: MobileViTV2Config) -> None:
pass
def forward(self, hidden_states: torch.Tensor, output_hidden_states: bool=False, return_dict: bool=True) -> Union[tuple, BaseModelOutputWithNoAttention]:
pass
| 3
| 0
| 52
| 8
| 43
| 1
| 6
| 0.02
| 1
| 9
| 4
| 0
| 2
| 3
| 2
| 12
| 106
| 17
| 87
| 26
| 79
| 2
| 43
| 21
| 40
| 6
| 1
| 2
| 11
|
3,939
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mobilevitv2/modeling_mobilevitv2.py
|
transformers.models.mobilevitv2.modeling_mobilevitv2.MobileViTV2FFN
|
import torch
from torch import nn
from .configuration_mobilevitv2 import MobileViTV2Config
class MobileViTV2FFN(nn.Module):
def __init__(self, config: MobileViTV2Config, embed_dim: int, ffn_latent_dim: int, ffn_dropout: float=0.0) -> None:
super().__init__()
self.conv1 = MobileViTV2ConvLayer(config=config, in_channels=embed_dim, out_channels=ffn_latent_dim, kernel_size=1, stride=1, bias=True, use_normalization=False, use_activation=True)
self.dropout1 = nn.Dropout(ffn_dropout)
self.conv2 = MobileViTV2ConvLayer(config=config, in_channels=ffn_latent_dim, out_channels=embed_dim, kernel_size=1, stride=1, bias=True, use_normalization=False, use_activation=False)
self.dropout2 = nn.Dropout(ffn_dropout)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.conv1(hidden_states)
hidden_states = self.dropout1(hidden_states)
hidden_states = self.conv2(hidden_states)
hidden_states = self.dropout2(hidden_states)
return hidden_states
|
class MobileViTV2FFN(nn.Module):
def __init__(self, config: MobileViTV2Config, embed_dim: int, ffn_latent_dim: int, ffn_dropout: float=0.0) -> None:
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 19
| 1
| 18
| 0
| 1
| 0
| 1
| 6
| 2
| 0
| 2
| 4
| 2
| 12
| 39
| 2
| 37
| 13
| 28
| 0
| 13
| 7
| 10
| 1
| 1
| 0
| 2
|
3,940
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mobilevitv2/modeling_mobilevitv2.py
|
transformers.models.mobilevitv2.modeling_mobilevitv2.MobileViTV2ForImageClassification
|
from ...utils import auto_docstring, logging
import torch
from typing import Optional, Union
from ...modeling_outputs import BaseModelOutputWithNoAttention, BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention, SemanticSegmenterOutput
from torch import nn
from .configuration_mobilevitv2 import MobileViTV2Config
@auto_docstring(custom_intro='\n MobileViTV2 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ')
class MobileViTV2ForImageClassification(MobileViTV2PreTrainedModel):
def __init__(self, config: MobileViTV2Config) -> None:
super().__init__(config)
self.num_labels = config.num_labels
self.mobilevitv2 = MobileViTV2Model(config)
out_channels = make_divisible(512 * config.width_multiplier, divisor=8)
self.classifier = nn.Linear(in_features=out_channels, out_features=config.num_labels) if config.num_labels > 0 else nn.Identity()
self.post_init()
@auto_docstring
def forward(self, pixel_values: Optional[torch.Tensor]=None, output_hidden_states: Optional[bool]=None, labels: Optional[torch.Tensor]=None, return_dict: Optional[bool]=None) -> Union[tuple, ImageClassifierOutputWithNoAttention]:
"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss). If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.mobilevitv2(pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict)
pooled_output = outputs.pooler_output if return_dict else outputs[1]
logits = self.classifier(pooled_output)
loss = None
if labels is not None:
loss = self.loss_function(labels, logits, self.config)
if not return_dict:
output = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=loss, logits=logits, hidden_states=outputs.hidden_states)
|
@auto_docstring(custom_intro='\n MobileViTV2 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ')
class MobileViTV2ForImageClassification(MobileViTV2PreTrainedModel):
def __init__(self, config: MobileViTV2Config) -> None:
pass
@auto_docstring
def forward(self, pixel_values: Optional[torch.Tensor]=None, output_hidden_states: Optional[bool]=None, labels: Optional[torch.Tensor]=None, return_dict: Optional[bool]=None) -> Union[tuple, ImageClassifierOutputWithNoAttention]:
'''
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss). If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
'''
pass
| 5
| 1
| 35
| 5
| 26
| 5
| 8
| 0.15
| 1
| 8
| 3
| 0
| 2
| 3
| 2
| 3
| 78
| 11
| 59
| 20
| 43
| 9
| 33
| 13
| 30
| 13
| 2
| 3
| 15
|
3,941
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mobilevitv2/modeling_mobilevitv2.py
|
transformers.models.mobilevitv2.modeling_mobilevitv2.MobileViTV2ForSemanticSegmentation
|
from ...utils import auto_docstring, logging
from torch.nn import CrossEntropyLoss
from typing import Optional, Union
from torch import nn
import torch
from .configuration_mobilevitv2 import MobileViTV2Config
from ...modeling_outputs import BaseModelOutputWithNoAttention, BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention, SemanticSegmenterOutput
@auto_docstring(custom_intro='\n MobileViTV2 model with a semantic segmentation head on top, e.g. for Pascal VOC.\n ')
class MobileViTV2ForSemanticSegmentation(MobileViTV2PreTrainedModel):
def __init__(self, config: MobileViTV2Config) -> None:
super().__init__(config)
self.num_labels = config.num_labels
self.mobilevitv2 = MobileViTV2Model(config, expand_output=False)
self.segmentation_head = MobileViTV2DeepLabV3(config)
self.post_init()
@auto_docstring
def forward(self, pixel_values: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, SemanticSegmenterOutput]:
"""
labels (`torch.LongTensor` of shape `(batch_size, height, width)`, *optional*):
Ground truth semantic segmentation maps for computing the loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels > 1`, a classification loss is computed (Cross-Entropy).
Examples:
```python
>>> import requests
>>> import torch
>>> from PIL import Image
>>> from transformers import AutoImageProcessor, MobileViTV2ForSemanticSegmentation
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> image_processor = AutoImageProcessor.from_pretrained("apple/mobilevitv2-1.0-imagenet1k-256")
>>> model = MobileViTV2ForSemanticSegmentation.from_pretrained("apple/mobilevitv2-1.0-imagenet1k-256")
>>> inputs = image_processor(images=image, return_tensors="pt")
>>> with torch.no_grad():
... outputs = model(**inputs)
>>> # logits are of shape (batch_size, num_labels, height, width)
>>> logits = outputs.logits
```"""
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if labels is not None and self.config.num_labels == 1:
raise ValueError('The number of labels should be greater than one')
outputs = self.mobilevitv2(pixel_values, output_hidden_states=True, return_dict=return_dict)
encoder_hidden_states = outputs.hidden_states if return_dict else outputs[1]
logits = self.segmentation_head(encoder_hidden_states)
loss = None
if labels is not None:
upsampled_logits = nn.functional.interpolate(logits, size=labels.shape[-2:], mode='bilinear', align_corners=False)
loss_fct = CrossEntropyLoss(ignore_index=self.config.semantic_loss_ignore_index)
loss = loss_fct(upsampled_logits, labels)
if not return_dict:
if output_hidden_states:
output = (logits,) + outputs[1:]
else:
output = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return SemanticSegmenterOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states if output_hidden_states else None, attentions=None)
|
@auto_docstring(custom_intro='\n MobileViTV2 model with a semantic segmentation head on top, e.g. for Pascal VOC.\n ')
class MobileViTV2ForSemanticSegmentation(MobileViTV2PreTrainedModel):
def __init__(self, config: MobileViTV2Config) -> None:
pass
@auto_docstring
def forward(self, pixel_values: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, SemanticSegmenterOutput]:
'''
labels (`torch.LongTensor` of shape `(batch_size, height, width)`, *optional*):
Ground truth semantic segmentation maps for computing the loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels > 1`, a classification loss is computed (Cross-Entropy).
Examples:
```python
>>> import requests
>>> import torch
>>> from PIL import Image
>>> from transformers import AutoImageProcessor, MobileViTV2ForSemanticSegmentation
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> image_processor = AutoImageProcessor.from_pretrained("apple/mobilevitv2-1.0-imagenet1k-256")
>>> model = MobileViTV2ForSemanticSegmentation.from_pretrained("apple/mobilevitv2-1.0-imagenet1k-256")
>>> inputs = image_processor(images=image, return_tensors="pt")
>>> with torch.no_grad():
... outputs = model(**inputs)
>>> # logits are of shape (batch_size, num_labels, height, width)
>>> logits = outputs.logits
```'''
pass
| 5
| 1
| 43
| 9
| 23
| 12
| 6
| 0.5
| 1
| 9
| 4
| 0
| 2
| 3
| 2
| 3
| 89
| 18
| 48
| 20
| 37
| 24
| 26
| 13
| 23
| 10
| 2
| 2
| 11
|
3,942
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mobilevitv2/modeling_mobilevitv2.py
|
transformers.models.mobilevitv2.modeling_mobilevitv2.MobileViTV2InvertedResidual
|
from .configuration_mobilevitv2 import MobileViTV2Config
from torch import nn
import torch
class MobileViTV2InvertedResidual(nn.Module):
"""
Inverted residual block (MobileNetv2): https://huggingface.co/papers/1801.04381
"""
def __init__(self, config: MobileViTV2Config, in_channels: int, out_channels: int, stride: int, dilation: int=1) -> None:
super().__init__()
expanded_channels = make_divisible(int(round(in_channels * config.expand_ratio)), 8)
if stride not in [1, 2]:
raise ValueError(f'Invalid stride {stride}.')
self.use_residual = stride == 1 and in_channels == out_channels
self.expand_1x1 = MobileViTV2ConvLayer(config, in_channels=in_channels, out_channels=expanded_channels, kernel_size=1)
self.conv_3x3 = MobileViTV2ConvLayer(config, in_channels=expanded_channels, out_channels=expanded_channels, kernel_size=3, stride=stride, groups=expanded_channels, dilation=dilation)
self.reduce_1x1 = MobileViTV2ConvLayer(config, in_channels=expanded_channels, out_channels=out_channels, kernel_size=1, use_activation=False)
def forward(self, features: torch.Tensor) -> torch.Tensor:
residual = features
features = self.expand_1x1(features)
features = self.conv_3x3(features)
features = self.reduce_1x1(features)
return residual + features if self.use_residual else features
|
class MobileViTV2InvertedResidual(nn.Module):
'''
Inverted residual block (MobileNetv2): https://huggingface.co/papers/1801.04381
'''
def __init__(self, config: MobileViTV2Config, in_channels: int, out_channels: int, stride: int, dilation: int=1) -> None:
pass
def forward(self, features: torch.Tensor) -> torch.Tensor:
pass
| 3
| 1
| 20
| 4
| 17
| 0
| 2
| 0.09
| 1
| 6
| 2
| 0
| 2
| 4
| 2
| 12
| 46
| 9
| 34
| 11
| 29
| 3
| 16
| 9
| 13
| 2
| 1
| 1
| 4
|
3,943
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mobilevitv2/modeling_mobilevitv2.py
|
transformers.models.mobilevitv2.modeling_mobilevitv2.MobileViTV2Layer
|
from ...modeling_layers import GradientCheckpointingLayer
import torch
from .configuration_mobilevitv2 import MobileViTV2Config
from torch import nn
class MobileViTV2Layer(GradientCheckpointingLayer):
"""
MobileViTV2 layer: https://huggingface.co/papers/2206.02680
"""
def __init__(self, config: MobileViTV2Config, in_channels: int, out_channels: int, attn_unit_dim: int, n_attn_blocks: int=2, dilation: int=1, stride: int=2) -> None:
super().__init__()
self.patch_width = config.patch_size
self.patch_height = config.patch_size
cnn_out_dim = attn_unit_dim
if stride == 2:
self.downsampling_layer = MobileViTV2InvertedResidual(config, in_channels=in_channels, out_channels=out_channels, stride=stride if dilation == 1 else 1, dilation=dilation // 2 if dilation > 1 else 1)
in_channels = out_channels
else:
self.downsampling_layer = None
self.conv_kxk = MobileViTV2ConvLayer(config, in_channels=in_channels, out_channels=in_channels, kernel_size=config.conv_kernel_size, groups=in_channels)
self.conv_1x1 = MobileViTV2ConvLayer(config, in_channels=in_channels, out_channels=cnn_out_dim, kernel_size=1, use_normalization=False, use_activation=False)
self.transformer = MobileViTV2Transformer(config, d_model=attn_unit_dim, n_layers=n_attn_blocks)
self.layernorm = nn.GroupNorm(num_groups=1, num_channels=attn_unit_dim, eps=config.layer_norm_eps)
self.conv_projection = MobileViTV2ConvLayer(config, in_channels=cnn_out_dim, out_channels=in_channels, kernel_size=1, use_normalization=True, use_activation=False)
def unfolding(self, feature_map: torch.Tensor) -> tuple[torch.Tensor, tuple[int, int]]:
batch_size, in_channels, img_height, img_width = feature_map.shape
patches = nn.functional.unfold(feature_map, kernel_size=(self.patch_height, self.patch_width), stride=(self.patch_height, self.patch_width))
patches = patches.reshape(batch_size, in_channels, self.patch_height * self.patch_width, -1)
return (patches, (img_height, img_width))
def folding(self, patches: torch.Tensor, output_size: tuple[int, int]) -> torch.Tensor:
batch_size, in_dim, patch_size, n_patches = patches.shape
patches = patches.reshape(batch_size, in_dim * patch_size, n_patches)
feature_map = nn.functional.fold(patches, output_size=output_size, kernel_size=(self.patch_height, self.patch_width), stride=(self.patch_height, self.patch_width))
return feature_map
def forward(self, features: torch.Tensor) -> torch.Tensor:
if self.downsampling_layer:
features = self.downsampling_layer(features)
features = self.conv_kxk(features)
features = self.conv_1x1(features)
patches, output_size = self.unfolding(features)
patches = self.transformer(patches)
patches = self.layernorm(patches)
features = self.folding(patches, output_size)
features = self.conv_projection(features)
return features
|
class MobileViTV2Layer(GradientCheckpointingLayer):
'''
MobileViTV2 layer: https://huggingface.co/papers/2206.02680
'''
def __init__(self, config: MobileViTV2Config, in_channels: int, out_channels: int, attn_unit_dim: int, n_attn_blocks: int=2, dilation: int=1, stride: int=2) -> None:
pass
def unfolding(self, feature_map: torch.Tensor) -> tuple[torch.Tensor, tuple[int, int]]:
pass
def folding(self, patches: torch.Tensor, output_size: tuple[int, int]) -> torch.Tensor:
pass
def forward(self, features: torch.Tensor) -> torch.Tensor:
pass
| 5
| 1
| 26
| 4
| 20
| 3
| 2
| 0.16
| 1
| 7
| 4
| 0
| 4
| 8
| 4
| 14
| 112
| 18
| 81
| 28
| 67
| 13
| 36
| 19
| 31
| 4
| 1
| 1
| 8
|
3,944
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mobilevitv2/modeling_mobilevitv2.py
|
transformers.models.mobilevitv2.modeling_mobilevitv2.MobileViTV2LinearSelfAttention
|
from torch import nn
import torch
from .configuration_mobilevitv2 import MobileViTV2Config
class MobileViTV2LinearSelfAttention(nn.Module):
"""
This layer applies a self-attention with linear complexity, as described in MobileViTV2 paper:
https://huggingface.co/papers/2206.02680
Args:
config (`MobileVitv2Config`):
Model configuration object
embed_dim (`int`):
`input_channels` from an expected input of size :math:`(batch_size, input_channels, height, width)`
"""
def __init__(self, config: MobileViTV2Config, embed_dim: int) -> None:
super().__init__()
self.qkv_proj = MobileViTV2ConvLayer(config=config, in_channels=embed_dim, out_channels=1 + 2 * embed_dim, bias=True, kernel_size=1, use_normalization=False, use_activation=False)
self.attn_dropout = nn.Dropout(p=config.attn_dropout)
self.out_proj = MobileViTV2ConvLayer(config=config, in_channels=embed_dim, out_channels=embed_dim, bias=True, kernel_size=1, use_normalization=False, use_activation=False)
self.embed_dim = embed_dim
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
qkv = self.qkv_proj(hidden_states)
query, key, value = torch.split(qkv, split_size_or_sections=[1, self.embed_dim, self.embed_dim], dim=1)
context_scores = torch.nn.functional.softmax(query, dim=-1)
context_scores = self.attn_dropout(context_scores)
context_vector = key * context_scores
context_vector = torch.sum(context_vector, dim=-1, keepdim=True)
out = torch.nn.functional.relu(value) * context_vector.expand_as(value)
out = self.out_proj(out)
return out
|
class MobileViTV2LinearSelfAttention(nn.Module):
'''
This layer applies a self-attention with linear complexity, as described in MobileViTV2 paper:
https://huggingface.co/papers/2206.02680
Args:
config (`MobileVitv2Config`):
Model configuration object
embed_dim (`int`):
`input_channels` from an expected input of size :math:`(batch_size, input_channels, height, width)`
'''
def __init__(self, config: MobileViTV2Config, embed_dim: int) -> None:
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
| 3
| 1
| 24
| 3
| 16
| 5
| 1
| 0.58
| 1
| 5
| 2
| 0
| 2
| 4
| 2
| 12
| 61
| 9
| 33
| 12
| 30
| 19
| 17
| 12
| 14
| 1
| 1
| 0
| 2
|
3,945
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mobilevitv2/modeling_mobilevitv2.py
|
transformers.models.mobilevitv2.modeling_mobilevitv2.MobileViTV2MobileNetLayer
|
from .configuration_mobilevitv2 import MobileViTV2Config
from torch import nn
import torch
class MobileViTV2MobileNetLayer(nn.Module):
def __init__(self, config: MobileViTV2Config, in_channels: int, out_channels: int, stride: int=1, num_stages: int=1) -> None:
super().__init__()
self.layer = nn.ModuleList()
for i in range(num_stages):
layer = MobileViTV2InvertedResidual(config, in_channels=in_channels, out_channels=out_channels, stride=stride if i == 0 else 1)
self.layer.append(layer)
in_channels = out_channels
def forward(self, features: torch.Tensor) -> torch.Tensor:
for layer_module in self.layer:
features = layer_module(features)
return features
|
class MobileViTV2MobileNetLayer(nn.Module):
def __init__(self, config: MobileViTV2Config, in_channels: int, out_channels: int, stride: int=1, num_stages: int=1) -> None:
pass
def forward(self, features: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 10
| 1
| 9
| 0
| 3
| 0
| 1
| 6
| 2
| 0
| 2
| 1
| 2
| 12
| 21
| 2
| 19
| 9
| 14
| 0
| 12
| 7
| 9
| 3
| 1
| 1
| 5
|
3,946
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mobilevitv2/modeling_mobilevitv2.py
|
transformers.models.mobilevitv2.modeling_mobilevitv2.MobileViTV2Model
|
from ...modeling_outputs import BaseModelOutputWithNoAttention, BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention, SemanticSegmenterOutput
import torch
from typing import Optional, Union
from .configuration_mobilevitv2 import MobileViTV2Config
from ...utils import auto_docstring, logging
@auto_docstring
class MobileViTV2Model(MobileViTV2PreTrainedModel):
def __init__(self, config: MobileViTV2Config, expand_output: bool=True):
"""
expand_output (`bool`, *optional*, defaults to `True`):
Whether to expand the output of the model. If `True`, the model will output pooled features in addition to
hidden states. If `False`, only the hidden states will be returned.
"""
super().__init__(config)
self.config = config
self.expand_output = expand_output
layer_0_dim = make_divisible(clip(value=32 * config.width_multiplier, min_val=16, max_val=64), divisor=8, min_value=16)
self.conv_stem = MobileViTV2ConvLayer(config, in_channels=config.num_channels, out_channels=layer_0_dim, kernel_size=3, stride=2, use_normalization=True, use_activation=True)
self.encoder = MobileViTV2Encoder(config)
self.post_init()
def _prune_heads(self, heads_to_prune):
"""Prunes heads of the model.
heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel
"""
for layer_index, heads in heads_to_prune.items():
mobilevitv2_layer = self.encoder.layer[layer_index]
if isinstance(mobilevitv2_layer, MobileViTV2Layer):
for transformer_layer in mobilevitv2_layer.transformer.layer:
transformer_layer.attention.prune_heads(heads)
@auto_docstring
def forward(self, pixel_values: Optional[torch.Tensor]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutputWithPoolingAndNoAttention]:
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('You have to specify pixel_values')
embedding_output = self.conv_stem(pixel_values)
encoder_outputs = self.encoder(embedding_output, output_hidden_states=output_hidden_states, return_dict=return_dict)
if self.expand_output:
last_hidden_state = encoder_outputs[0]
pooled_output = torch.mean(last_hidden_state, dim=[-2, -1], keepdim=False)
else:
last_hidden_state = encoder_outputs[0]
pooled_output = None
if not return_dict:
output = (last_hidden_state, pooled_output) if pooled_output is not None else (last_hidden_state,)
return output + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(last_hidden_state=last_hidden_state, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states)
|
@auto_docstring
class MobileViTV2Model(MobileViTV2PreTrainedModel):
def __init__(self, config: MobileViTV2Config, expand_output: bool=True):
'''
expand_output (`bool`, *optional*, defaults to `True`):
Whether to expand the output of the model. If `True`, the model will output pooled features in addition to
hidden states. If `False`, only the hidden states will be returned.
'''
pass
def _prune_heads(self, heads_to_prune):
'''Prunes heads of the model.
heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel
'''
pass
@auto_docstring
def forward(self, pixel_values: Optional[torch.Tensor]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutputWithPoolingAndNoAttention]:
pass
| 6
| 2
| 24
| 3
| 19
| 2
| 4
| 0.08
| 1
| 10
| 5
| 0
| 3
| 4
| 3
| 4
| 82
| 12
| 65
| 23
| 48
| 5
| 31
| 17
| 27
| 7
| 2
| 3
| 12
|
3,947
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mobilevitv2/modeling_mobilevitv2.py
|
transformers.models.mobilevitv2.modeling_mobilevitv2.MobileViTV2PreTrainedModel
|
from ...modeling_utils import PreTrainedModel
from .configuration_mobilevitv2 import MobileViTV2Config
from ...utils import auto_docstring, logging
from torch import nn
@auto_docstring
class MobileViTV2PreTrainedModel(PreTrainedModel):
config: MobileViTV2Config
base_model_prefix = 'mobilevitv2'
main_input_name = 'pixel_values'
supports_gradient_checkpointing = True
_no_split_modules = ['MobileViTV2Layer']
def _init_weights(self, module: nn.Module) -> None:
"""Initialize the weights"""
if isinstance(module, (nn.Linear, nn.Conv2d, nn.BatchNorm2d)):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.GroupNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
|
@auto_docstring
class MobileViTV2PreTrainedModel(PreTrainedModel):
def _init_weights(self, module: nn.Module) -> None:
'''Initialize the weights'''
pass
| 3
| 1
| 11
| 0
| 8
| 3
| 4
| 0.5
| 1
| 0
| 0
| 3
| 1
| 0
| 1
| 1
| 23
| 2
| 14
| 7
| 12
| 7
| 13
| 7
| 11
| 4
| 1
| 2
| 4
|
3,948
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mobilevitv2/modeling_mobilevitv2.py
|
transformers.models.mobilevitv2.modeling_mobilevitv2.MobileViTV2Transformer
|
from torch import nn
from .configuration_mobilevitv2 import MobileViTV2Config
import torch
class MobileViTV2Transformer(nn.Module):
def __init__(self, config: MobileViTV2Config, n_layers: int, d_model: int) -> None:
super().__init__()
ffn_multiplier = config.ffn_multiplier
ffn_dims = [ffn_multiplier * d_model] * n_layers
ffn_dims = [int(d // 16 * 16) for d in ffn_dims]
self.layer = nn.ModuleList()
for block_idx in range(n_layers):
transformer_layer = MobileViTV2TransformerLayer(config, embed_dim=d_model, ffn_latent_dim=ffn_dims[block_idx])
self.layer.append(transformer_layer)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
for layer_module in self.layer:
hidden_states = layer_module(hidden_states)
return hidden_states
|
class MobileViTV2Transformer(nn.Module):
def __init__(self, config: MobileViTV2Config, n_layers: int, d_model: int) -> None:
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 10
| 2
| 8
| 1
| 2
| 0.06
| 1
| 6
| 2
| 0
| 2
| 1
| 2
| 12
| 22
| 5
| 16
| 9
| 13
| 1
| 14
| 9
| 11
| 2
| 1
| 1
| 4
|
3,949
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/mobilevitv2/modeling_mobilevitv2.py
|
transformers.models.mobilevitv2.modeling_mobilevitv2.MobileViTV2TransformerLayer
|
from torch import nn
import torch
from .configuration_mobilevitv2 import MobileViTV2Config
class MobileViTV2TransformerLayer(nn.Module):
def __init__(self, config: MobileViTV2Config, embed_dim: int, ffn_latent_dim: int, dropout: float=0.0) -> None:
super().__init__()
self.layernorm_before = nn.GroupNorm(num_groups=1, num_channels=embed_dim, eps=config.layer_norm_eps)
self.attention = MobileViTV2LinearSelfAttention(config, embed_dim)
self.dropout1 = nn.Dropout(p=dropout)
self.layernorm_after = nn.GroupNorm(num_groups=1, num_channels=embed_dim, eps=config.layer_norm_eps)
self.ffn = MobileViTV2FFN(config, embed_dim, ffn_latent_dim, config.ffn_dropout)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
layernorm_1_out = self.layernorm_before(hidden_states)
attention_output = self.attention(layernorm_1_out)
hidden_states = attention_output + hidden_states
layer_output = self.layernorm_after(hidden_states)
layer_output = self.ffn(layer_output)
layer_output = layer_output + hidden_states
return layer_output
|
class MobileViTV2TransformerLayer(nn.Module):
def __init__(self, config: MobileViTV2Config, embed_dim: int, ffn_latent_dim: int, dropout: float=0.0) -> None:
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 12
| 1
| 11
| 0
| 1
| 0
| 1
| 7
| 3
| 0
| 2
| 5
| 2
| 12
| 25
| 3
| 22
| 17
| 13
| 0
| 16
| 11
| 13
| 1
| 1
| 0
| 2
|
3,950
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/modernbert/configuration_modernbert.py
|
transformers.models.modernbert.configuration_modernbert.ModernBertConfig
|
from ...configuration_utils import PretrainedConfig
from typing import Literal
class ModernBertConfig(PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`ModernBertModel`]. It is used to instantiate an ModernBert
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the ModernBERT-base.
e.g. [answerdotai/ModernBERT-base](https://huggingface.co/answerdotai/ModernBERT-base)
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 50368):
Vocabulary size of the ModernBert model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`ModernBertModel`]
hidden_size (`int`, *optional*, defaults to 768):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 1152):
Dimension of the MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 22):
Number of hidden layers in the Transformer decoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer decoder.
hidden_activation (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the decoder. Will default to `"gelu"`
if not specified.
max_position_embeddings (`int`, *optional*, defaults to 8192):
The maximum sequence length that this model might ever be used with.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
initializer_cutoff_factor (`float`, *optional*, defaults to 2.0):
The cutoff factor for the truncated_normal_initializer for initializing all weight matrices.
norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the rms normalization layers.
norm_bias (`bool`, *optional*, defaults to `False`):
Whether to use bias in the normalization layers.
pad_token_id (`int`, *optional*, defaults to 50283):
Padding token id.
eos_token_id (`int`, *optional*, defaults to 50282):
End of stream token id.
bos_token_id (`int`, *optional*, defaults to 50281):
Beginning of stream token id.
cls_token_id (`int`, *optional*, defaults to 50281):
Classification token id.
sep_token_id (`int`, *optional*, defaults to 50282):
Separation token id.
global_rope_theta (`float`, *optional*, defaults to 160000.0):
The base period of the global RoPE embeddings.
attention_bias (`bool`, *optional*, defaults to `False`):
Whether to use a bias in the query, key, value and output projection layers during self-attention.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
global_attn_every_n_layers (`int`, *optional*, defaults to 3):
The number of layers between global attention layers.
local_attention (`int`, *optional*, defaults to 128):
The window size for local attention.
local_rope_theta (`float`, *optional*, defaults to 10000.0):
The base period of the local RoPE embeddings.
embedding_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the embeddings.
mlp_bias (`bool`, *optional*, defaults to `False`):
Whether to use bias in the MLP layers.
mlp_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the MLP layers.
decoder_bias (`bool`, *optional*, defaults to `True`):
Whether to use bias in the decoder layers.
classifier_pooling (`str`, *optional*, defaults to `"cls"`):
The pooling method for the classifier. Should be either `"cls"` or `"mean"`. In local attention layers, the
CLS token doesn't attend to all tokens on long sequences.
classifier_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the classifier.
classifier_bias (`bool`, *optional*, defaults to `False`):
Whether to use bias in the classifier.
classifier_activation (`str`, *optional*, defaults to `"gelu"`):
The activation function for the classifier.
deterministic_flash_attn (`bool`, *optional*, defaults to `False`):
Whether to use deterministic flash attention. If `False`, inference will be faster but not deterministic.
sparse_prediction (`bool`, *optional*, defaults to `False`):
Whether to use sparse prediction for the masked language model instead of returning the full dense logits.
sparse_pred_ignore_index (`int`, *optional*, defaults to -100):
The index to ignore for the sparse prediction.
reference_compile (`bool`, *optional*):
Whether to compile the layers of the model which were compiled during pretraining. If `None`, then parts of
the model will be compiled if 1) `triton` is installed, 2) the model is not on MPS, 3) the model is not
shared between devices, and 4) the model is not resized after initialization. If `True`, then the model may
be faster in some scenarios.
repad_logits_with_grad (`bool`, *optional*, defaults to `False`):
When True, ModernBertForMaskedLM keeps track of the logits' gradient when repadding for output. This only
applies when using Flash Attention 2 with passed labels. Otherwise output logits always have a gradient.
Examples:
```python
>>> from transformers import ModernBertModel, ModernBertConfig
>>> # Initializing a ModernBert style configuration
>>> configuration = ModernBertConfig()
>>> # Initializing a model from the modernbert-base style configuration
>>> model = ModernBertModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = 'modernbert'
attribute_map = {'rope_theta': 'global_rope_theta'}
keys_to_ignore_at_inference = ['past_key_values']
def __init__(self, vocab_size=50368, hidden_size=768, intermediate_size=1152, num_hidden_layers=22, num_attention_heads=12, hidden_activation='gelu', max_position_embeddings=8192, initializer_range=0.02, initializer_cutoff_factor=2.0, norm_eps=1e-05, norm_bias=False, pad_token_id=50283, eos_token_id=50282, bos_token_id=50281, cls_token_id=50281, sep_token_id=50282, global_rope_theta=160000.0, attention_bias=False, attention_dropout=0.0, global_attn_every_n_layers=3, local_attention=128, local_rope_theta=10000.0, embedding_dropout=0.0, mlp_bias=False, mlp_dropout=0.0, decoder_bias=True, classifier_pooling: Literal['cls', 'mean']='cls', classifier_dropout=0.0, classifier_bias=False, classifier_activation='gelu', deterministic_flash_attn=False, sparse_prediction=False, sparse_pred_ignore_index=-100, reference_compile=None, repad_logits_with_grad=False, **kwargs):
super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, cls_token_id=cls_token_id, sep_token_id=sep_token_id, **kwargs)
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.initializer_range = initializer_range
self.initializer_cutoff_factor = initializer_cutoff_factor
self.norm_eps = norm_eps
self.norm_bias = norm_bias
self.global_rope_theta = global_rope_theta
self.attention_bias = attention_bias
self.attention_dropout = attention_dropout
self.hidden_activation = hidden_activation
self.global_attn_every_n_layers = global_attn_every_n_layers
self.local_attention = local_attention
self.local_rope_theta = local_rope_theta
self.embedding_dropout = embedding_dropout
self.mlp_bias = mlp_bias
self.mlp_dropout = mlp_dropout
self.decoder_bias = decoder_bias
self.classifier_pooling = classifier_pooling
self.classifier_dropout = classifier_dropout
self.classifier_bias = classifier_bias
self.classifier_activation = classifier_activation
self.deterministic_flash_attn = deterministic_flash_attn
self.sparse_prediction = sparse_prediction
self.sparse_pred_ignore_index = sparse_pred_ignore_index
self.reference_compile = reference_compile
self.repad_logits_with_grad = repad_logits_with_grad
if self.classifier_pooling not in ['cls', 'mean']:
raise ValueError(f'Invalid value for `classifier_pooling`, should be either "cls" or "mean", but is {self.classifier_pooling}.')
def to_dict(self):
output = super().to_dict()
output.pop('reference_compile', None)
return output
|
class ModernBertConfig(PretrainedConfig):
'''
This is the configuration class to store the configuration of a [`ModernBertModel`]. It is used to instantiate an ModernBert
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the ModernBERT-base.
e.g. [answerdotai/ModernBERT-base](https://huggingface.co/answerdotai/ModernBERT-base)
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 50368):
Vocabulary size of the ModernBert model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`ModernBertModel`]
hidden_size (`int`, *optional*, defaults to 768):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 1152):
Dimension of the MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 22):
Number of hidden layers in the Transformer decoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer decoder.
hidden_activation (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the decoder. Will default to `"gelu"`
if not specified.
max_position_embeddings (`int`, *optional*, defaults to 8192):
The maximum sequence length that this model might ever be used with.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
initializer_cutoff_factor (`float`, *optional*, defaults to 2.0):
The cutoff factor for the truncated_normal_initializer for initializing all weight matrices.
norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the rms normalization layers.
norm_bias (`bool`, *optional*, defaults to `False`):
Whether to use bias in the normalization layers.
pad_token_id (`int`, *optional*, defaults to 50283):
Padding token id.
eos_token_id (`int`, *optional*, defaults to 50282):
End of stream token id.
bos_token_id (`int`, *optional*, defaults to 50281):
Beginning of stream token id.
cls_token_id (`int`, *optional*, defaults to 50281):
Classification token id.
sep_token_id (`int`, *optional*, defaults to 50282):
Separation token id.
global_rope_theta (`float`, *optional*, defaults to 160000.0):
The base period of the global RoPE embeddings.
attention_bias (`bool`, *optional*, defaults to `False`):
Whether to use a bias in the query, key, value and output projection layers during self-attention.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
global_attn_every_n_layers (`int`, *optional*, defaults to 3):
The number of layers between global attention layers.
local_attention (`int`, *optional*, defaults to 128):
The window size for local attention.
local_rope_theta (`float`, *optional*, defaults to 10000.0):
The base period of the local RoPE embeddings.
embedding_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the embeddings.
mlp_bias (`bool`, *optional*, defaults to `False`):
Whether to use bias in the MLP layers.
mlp_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the MLP layers.
decoder_bias (`bool`, *optional*, defaults to `True`):
Whether to use bias in the decoder layers.
classifier_pooling (`str`, *optional*, defaults to `"cls"`):
The pooling method for the classifier. Should be either `"cls"` or `"mean"`. In local attention layers, the
CLS token doesn't attend to all tokens on long sequences.
classifier_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the classifier.
classifier_bias (`bool`, *optional*, defaults to `False`):
Whether to use bias in the classifier.
classifier_activation (`str`, *optional*, defaults to `"gelu"`):
The activation function for the classifier.
deterministic_flash_attn (`bool`, *optional*, defaults to `False`):
Whether to use deterministic flash attention. If `False`, inference will be faster but not deterministic.
sparse_prediction (`bool`, *optional*, defaults to `False`):
Whether to use sparse prediction for the masked language model instead of returning the full dense logits.
sparse_pred_ignore_index (`int`, *optional*, defaults to -100):
The index to ignore for the sparse prediction.
reference_compile (`bool`, *optional*):
Whether to compile the layers of the model which were compiled during pretraining. If `None`, then parts of
the model will be compiled if 1) `triton` is installed, 2) the model is not on MPS, 3) the model is not
shared between devices, and 4) the model is not resized after initialization. If `True`, then the model may
be faster in some scenarios.
repad_logits_with_grad (`bool`, *optional*, defaults to `False`):
When True, ModernBertForMaskedLM keeps track of the logits' gradient when repadding for output. This only
applies when using Flash Attention 2 with passed labels. Otherwise output logits always have a gradient.
Examples:
```python
>>> from transformers import ModernBertModel, ModernBertConfig
>>> # Initializing a ModernBert style configuration
>>> configuration = ModernBertConfig()
>>> # Initializing a model from the modernbert-base style configuration
>>> model = ModernBertModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```'''
def __init__(self, vocab_size=50368, hidden_size=768, intermediate_size=1152, num_hidden_layers=22, num_attention_heads=12, hidden_activation='gelu', max_position_embeddings=8192, initializer_range=0.02, initializer_cutoff_factor=2.0, norm_eps=1e-05, norm_bias=False, pad_token_id=50283, eos_token_id=50282, bos_token_id=50281, cls_token_id=50281, sep_token_id=50282, global_rope_theta=160000.0, attention_bias=False, attention_dropout=0.0, global_attn_every_n_layers=3, local_attention=128, local_rope_theta=10000.0, embedding_dropout=0.0, mlp_bias=False, mlp_dropout=0.0, decoder_bias=True, classifier_pooling: Literal['cls', 'mean']='cls', classifier_dropout=0.0, classifier_bias=False, classifier_activation='gelu', deterministic_flash_attn=False, sparse_prediction=False, sparse_pred_ignore_index=-100, reference_compile=None, repad_logits_with_grad=False, **kwargs):
pass
def to_dict(self):
pass
| 3
| 1
| 82
| 1
| 81
| 0
| 2
| 1.13
| 1
| 2
| 0
| 0
| 1
| 30
| 1
| 1
| 189
| 10
| 84
| 72
| 44
| 95
| 37
| 34
| 35
| 2
| 1
| 1
| 2
|
3,951
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/modernbert/modeling_modernbert.py
|
transformers.models.modernbert.modeling_modernbert.ApplyRotaryEmbUnpad
|
import torch
from typing import Optional, Union
import torch.nn.functional as F
class ApplyRotaryEmbUnpad(torch.autograd.Function):
@staticmethod
def forward(ctx, qkv, cos, sin, cu_seqlens: Optional[torch.Tensor]=None, max_seqlen: Optional[int]=None):
qkv = qkv.contiguous()
total_nnz, _three, _nheads, headdim = qkv.shape
qk = qkv[:, :2].view(total_nnz, -1, headdim)
apply_rotary(qk, cos, sin, seqlen_offsets=0, cu_seqlens=cu_seqlens, max_seqlen=max_seqlen, interleaved=False, inplace=True)
ctx.save_for_backward(cos, sin, cu_seqlens)
ctx.max_seqlen = max_seqlen
return qkv
@staticmethod
def backward(ctx, do):
cos, sin, cu_seqlens = ctx.saved_tensors
do = do.contiguous()
total_nnz, _three, _nheads, headdim = do.shape
dqk = do[:, :2].view(total_nnz, -1, headdim)
apply_rotary(dqk, cos, sin, seqlen_offsets=0, cu_seqlens=cu_seqlens, max_seqlen=ctx.max_seqlen, interleaved=False, inplace=True, conjugate=True)
return (do, None, None, None, None, None, None)
|
class ApplyRotaryEmbUnpad(torch.autograd.Function):
@staticmethod
def forward(ctx, qkv, cos, sin, cu_seqlens: Optional[torch.Tensor]=None, max_seqlen: Optional[int]=None):
pass
@staticmethod
def backward(ctx, do):
pass
| 5
| 0
| 25
| 1
| 21
| 3
| 1
| 0.14
| 1
| 2
| 0
| 0
| 0
| 0
| 2
| 32
| 53
| 3
| 44
| 17
| 32
| 6
| 16
| 8
| 13
| 1
| 5
| 0
| 2
|
3,952
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/modernbert/modeling_modernbert.py
|
transformers.models.modernbert.modeling_modernbert.ModernBertAttention
|
from torch import nn
from .configuration_modernbert import ModernBertConfig
import torch.nn.functional as F
import copy
import torch
from typing import Optional, Union
class ModernBertAttention(nn.Module):
"""Performs multi-headed self attention on a batch of unpadded sequences.
If Flash Attention 2 is installed, this module uses Flash Attention to improve throughput.
If Flash Attention 2 is not installed, the implementation will use PyTorch's SDPA kernel,
which requires padding and unpadding inputs, adding some overhead.
See `forward` method for additional details.
"""
def __init__(self, config: ModernBertConfig, layer_id: Optional[int]=None):
super().__init__()
self.config = config
self.layer_id = layer_id
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(f'The hidden size ({config.hidden_size}) is not a multiple of the number of attention heads ({config.num_attention_heads})')
self.attention_dropout = config.attention_dropout
self.deterministic_flash_attn = config.deterministic_flash_attn
self.num_heads = config.num_attention_heads
self.head_dim = config.hidden_size // config.num_attention_heads
self.all_head_size = self.head_dim * self.num_heads
self.Wqkv = nn.Linear(config.hidden_size, 3 * self.all_head_size, bias=config.attention_bias)
if layer_id % config.global_attn_every_n_layers != 0:
self.local_attention = (config.local_attention // 2, config.local_attention // 2)
rope_theta = config.local_rope_theta if config.local_rope_theta is not None else config.global_rope_theta
max_position_embeddings = config.local_attention
else:
self.local_attention = (-1, -1)
max_position_embeddings = config.max_position_embeddings
rope_theta = config.global_rope_theta
if config._attn_implementation == 'flash_attention_2':
self.rotary_emb = ModernBertUnpaddedRotaryEmbedding(dim=self.head_dim, max_seqlen=max_position_embeddings, base=rope_theta)
else:
config_copy = copy.deepcopy(config)
config_copy.rope_theta = rope_theta
self.rotary_emb = ModernBertRotaryEmbedding(config=config_copy)
self.Wo = nn.Linear(config.hidden_size, config.hidden_size, bias=config.attention_bias)
self.out_drop = nn.Dropout(config.attention_dropout) if config.attention_dropout > 0.0 else nn.Identity()
self.pruned_heads = set()
def forward(self, hidden_states: torch.Tensor, output_attentions: Optional[bool]=False, **kwargs) -> torch.Tensor:
qkv = self.Wqkv(hidden_states)
bs = hidden_states.shape[0]
if self.config._attn_implementation == 'flash_attention_2':
qkv = qkv.view(-1, 3, self.num_heads, self.head_dim)
else:
qkv = qkv.view(bs, -1, 3, self.num_heads, self.head_dim)
attn_outputs = MODERNBERT_ATTENTION_FUNCTION[self.config._attn_implementation](self, qkv=qkv, rotary_emb=self.rotary_emb, local_attention=self.local_attention, bs=bs, dim=self.all_head_size, output_attentions=output_attentions, **kwargs)
hidden_states = attn_outputs[0]
hidden_states = self.out_drop(self.Wo(hidden_states))
return (hidden_states,) + attn_outputs[1:]
|
class ModernBertAttention(nn.Module):
'''Performs multi-headed self attention on a batch of unpadded sequences.
If Flash Attention 2 is installed, this module uses Flash Attention to improve throughput.
If Flash Attention 2 is not installed, the implementation will use PyTorch's SDPA kernel,
which requires padding and unpadding inputs, adding some overhead.
See `forward` method for additional details.
'''
def __init__(self, config: ModernBertConfig, layer_id: Optional[int]=None):
pass
def forward(self, hidden_states: torch.Tensor, output_attentions: Optional[bool]=False, **kwargs) -> torch.Tensor:
pass
| 3
| 1
| 34
| 5
| 29
| 1
| 5
| 0.12
| 1
| 9
| 3
| 0
| 2
| 13
| 2
| 12
| 78
| 13
| 59
| 26
| 51
| 7
| 38
| 21
| 35
| 7
| 1
| 2
| 9
|
3,953
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/modernbert/modeling_modernbert.py
|
transformers.models.modernbert.modeling_modernbert.ModernBertEmbeddings
|
from torch import nn
from .configuration_modernbert import ModernBertConfig
import torch.nn.functional as F
import torch
from typing import Optional, Union
class ModernBertEmbeddings(nn.Module):
"""
Same as BertEmbeddings with a tiny tweak for positional embeddings indexing.
"""
def __init__(self, config: ModernBertConfig):
super().__init__()
self.config = config
self.tok_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
self.norm = nn.LayerNorm(config.hidden_size, eps=config.norm_eps, bias=config.norm_bias)
self.drop = nn.Dropout(config.embedding_dropout)
@torch.compile(dynamic=True)
def compiled_embeddings(self, input_ids: torch.LongTensor) -> torch.Tensor:
return self.drop(self.norm(self.tok_embeddings(input_ids)))
def forward(self, input_ids: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.Tensor]=None) -> torch.Tensor:
if inputs_embeds is not None:
hidden_states = self.drop(self.norm(inputs_embeds))
else:
hidden_states = self.compiled_embeddings(input_ids) if self.config.reference_compile else self.drop(self.norm(self.tok_embeddings(input_ids)))
return hidden_states
|
class ModernBertEmbeddings(nn.Module):
'''
Same as BertEmbeddings with a tiny tweak for positional embeddings indexing.
'''
def __init__(self, config: ModernBertConfig):
pass
@torch.compile(dynamic=True)
def compiled_embeddings(self, input_ids: torch.LongTensor) -> torch.Tensor:
pass
def forward(self, input_ids: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.Tensor]=None) -> torch.Tensor:
pass
| 5
| 1
| 7
| 0
| 7
| 0
| 2
| 0.14
| 1
| 3
| 1
| 0
| 3
| 4
| 3
| 13
| 28
| 3
| 22
| 12
| 15
| 3
| 14
| 9
| 10
| 3
| 1
| 1
| 5
|
3,954
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/modernbert/modeling_modernbert.py
|
transformers.models.modernbert.modeling_modernbert.ModernBertEncoderLayer
|
from typing import Optional, Union
import torch
import torch.nn.functional as F
from .configuration_modernbert import ModernBertConfig
from ...modeling_layers import GradientCheckpointingLayer
from torch import nn
class ModernBertEncoderLayer(GradientCheckpointingLayer):
def __init__(self, config: ModernBertConfig, layer_id: Optional[int]=None):
super().__init__()
self.config = config
if layer_id == 0:
self.attn_norm = nn.Identity()
else:
self.attn_norm = nn.LayerNorm(config.hidden_size, eps=config.norm_eps, bias=config.norm_bias)
self.attn = ModernBertAttention(config=config, layer_id=layer_id)
self.mlp_norm = nn.LayerNorm(config.hidden_size, eps=config.norm_eps, bias=config.norm_bias)
self.mlp = ModernBertMLP(config)
@torch.compile(dynamic=True)
def compiled_mlp(self, hidden_states: torch.Tensor) -> torch.Tensor:
return self.mlp(self.mlp_norm(hidden_states))
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, sliding_window_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, cu_seqlens: Optional[torch.Tensor]=None, max_seqlen: Optional[int]=None, output_attentions: Optional[bool]=False) -> torch.Tensor:
attn_outputs = self.attn(self.attn_norm(hidden_states), attention_mask=attention_mask, sliding_window_mask=sliding_window_mask, position_ids=position_ids, cu_seqlens=cu_seqlens, max_seqlen=max_seqlen, output_attentions=output_attentions)
hidden_states = hidden_states + attn_outputs[0]
mlp_output = self.compiled_mlp(hidden_states) if self.config.reference_compile else self.mlp(self.mlp_norm(hidden_states))
hidden_states = hidden_states + mlp_output
return (hidden_states,) + attn_outputs[1:]
|
class ModernBertEncoderLayer(GradientCheckpointingLayer):
def __init__(self, config: ModernBertConfig, layer_id: Optional[int]=None):
pass
@torch.compile(dynamic=True)
def compiled_mlp(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, sliding_window_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, cu_seqlens: Optional[torch.Tensor]=None, max_seqlen: Optional[int]=None, output_attentions: Optional[bool]=False) -> torch.Tensor:
pass
| 5
| 0
| 13
| 0
| 13
| 0
| 2
| 0.02
| 1
| 7
| 3
| 0
| 3
| 5
| 3
| 13
| 44
| 3
| 41
| 21
| 27
| 1
| 18
| 11
| 14
| 2
| 1
| 1
| 5
|
3,955
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/modernbert/modeling_modernbert.py
|
transformers.models.modernbert.modeling_modernbert.ModernBertForMaskedLM
|
from ...utils import auto_docstring, is_flash_attn_2_available, logging
from contextlib import nullcontext
from ...modeling_outputs import BaseModelOutput, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput
from torch import nn
from .configuration_modernbert import ModernBertConfig
import torch.nn.functional as F
import torch
from typing import Optional, Union
@auto_docstring(custom_intro='\n The ModernBert Model with a decoder head on top that is used for masked language modeling.\n ')
class ModernBertForMaskedLM(ModernBertPreTrainedModel):
_tied_weights_keys = ['decoder.weight']
def __init__(self, config: ModernBertConfig):
super().__init__(config)
self.config = config
self.model = ModernBertModel(config)
self.head = ModernBertPredictionHead(config)
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=config.decoder_bias)
self.sparse_prediction = self.config.sparse_prediction
self.sparse_pred_ignore_index = self.config.sparse_pred_ignore_index
self.post_init()
def get_output_embeddings(self):
return self.decoder
def set_output_embeddings(self, new_embeddings: nn.Linear):
self.decoder = new_embeddings
@torch.compile(dynamic=True)
def compiled_head(self, output: torch.Tensor) -> torch.Tensor:
return self.decoder(self.head(output))
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, sliding_window_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, indices: Optional[torch.Tensor]=None, cu_seqlens: Optional[torch.Tensor]=None, max_seqlen: Optional[int]=None, batch_size: Optional[int]=None, seq_len: Optional[int]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, **kwargs) -> Union[tuple[torch.Tensor], MaskedLMOutput]:
"""
sliding_window_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding or far-away tokens. In ModernBert, only every few layers
perform global attention, while the rest perform local attention. This mask is used to avoid attending to
far-away tokens in the local attention layers when not using Flash Attention.
indices (`torch.Tensor` of shape `(total_unpadded_tokens,)`, *optional*):
Indices of the non-padding tokens in the input sequence. Used for unpadding the output.
cu_seqlens (`torch.Tensor` of shape `(batch + 1,)`, *optional*):
Cumulative sequence lengths of the input sequences. Used to index the unpadded tensors.
max_seqlen (`int`, *optional*):
Maximum sequence length in the batch excluding padding tokens. Used to unpad input_ids and pad output tensors.
batch_size (`int`, *optional*):
Batch size of the input sequences. Used to pad the output tensors.
seq_len (`int`, *optional*):
Sequence length of the input sequences including padding tokens. Used to pad the output tensors.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
self._maybe_set_compile()
if self.config._attn_implementation == 'flash_attention_2':
if indices is None and cu_seqlens is None and (max_seqlen is None):
if batch_size is None and seq_len is None:
if inputs_embeds is not None:
batch_size, seq_len = inputs_embeds.shape[:2]
else:
batch_size, seq_len = input_ids.shape[:2]
device = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
attention_mask = torch.ones((batch_size, seq_len), device=device, dtype=torch.bool)
if inputs_embeds is None:
with torch.no_grad():
input_ids, indices, cu_seqlens, max_seqlen, position_ids, labels = _unpad_modernbert_input(inputs=input_ids, attention_mask=attention_mask, position_ids=position_ids, labels=labels)
else:
inputs_embeds, indices, cu_seqlens, max_seqlen, position_ids, labels = _unpad_modernbert_input(inputs=inputs_embeds, attention_mask=attention_mask, position_ids=position_ids, labels=labels)
outputs = self.model(input_ids=input_ids, attention_mask=attention_mask, sliding_window_mask=sliding_window_mask, position_ids=position_ids, inputs_embeds=inputs_embeds, indices=indices, cu_seqlens=cu_seqlens, max_seqlen=max_seqlen, batch_size=batch_size, seq_len=seq_len, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
last_hidden_state = outputs[0]
if self.sparse_prediction and labels is not None:
labels = labels.view(-1)
last_hidden_state = last_hidden_state.view(labels.shape[0], -1)
mask_tokens = labels != self.sparse_pred_ignore_index
last_hidden_state = last_hidden_state[mask_tokens]
labels = labels[mask_tokens]
logits = self.compiled_head(last_hidden_state) if self.config.reference_compile else self.decoder(self.head(last_hidden_state))
loss = None
if labels is not None:
loss = self.loss_function(logits, labels, vocab_size=self.config.vocab_size, **kwargs)
if self.config._attn_implementation == 'flash_attention_2':
with nullcontext() if self.config.repad_logits_with_grad or labels is None else torch.no_grad():
logits = _pad_modernbert_output(inputs=logits, indices=indices, batch=batch_size, seqlen=seq_len)
if not return_dict:
output = (logits,)
return (loss,) + output if loss is not None else output
return MaskedLMOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
|
@auto_docstring(custom_intro='\n The ModernBert Model with a decoder head on top that is used for masked language modeling.\n ')
class ModernBertForMaskedLM(ModernBertPreTrainedModel):
def __init__(self, config: ModernBertConfig):
pass
def get_output_embeddings(self):
pass
def set_output_embeddings(self, new_embeddings: nn.Linear):
pass
@torch.compile(dynamic=True)
def compiled_head(self, output: torch.Tensor) -> torch.Tensor:
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, sliding_window_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, indices: Optional[torch.Tensor]=None, cu_seqlens: Optional[torch.Tensor]=None, max_seqlen: Optional[int]=None, batch_size: Optional[int]=None, seq_len: Optional[int]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, **kwargs) -> Union[tuple[torch.Tensor], MaskedLMOutput]:
'''
sliding_window_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding or far-away tokens. In ModernBert, only every few layers
perform global attention, while the rest perform local attention. This mask is used to avoid attending to
far-away tokens in the local attention layers when not using Flash Attention.
indices (`torch.Tensor` of shape `(total_unpadded_tokens,)`, *optional*):
Indices of the non-padding tokens in the input sequence. Used for unpadding the output.
cu_seqlens (`torch.Tensor` of shape `(batch + 1,)`, *optional*):
Cumulative sequence lengths of the input sequences. Used to index the unpadded tensors.
max_seqlen (`int`, *optional*):
Maximum sequence length in the batch excluding padding tokens. Used to unpad input_ids and pad output tensors.
batch_size (`int`, *optional*):
Batch size of the input sequences. Used to pad the output tensors.
seq_len (`int`, *optional*):
Sequence length of the input sequences including padding tokens. Used to pad the output tensors.
'''
pass
| 9
| 1
| 22
| 3
| 19
| 1
| 4
| 0.03
| 1
| 9
| 4
| 0
| 5
| 6
| 5
| 9
| 126
| 18
| 105
| 39
| 75
| 3
| 52
| 20
| 46
| 16
| 2
| 4
| 20
|
3,956
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/modernbert/modeling_modernbert.py
|
transformers.models.modernbert.modeling_modernbert.ModernBertForSequenceClassification
|
from typing import Optional, Union
from ...utils import auto_docstring, is_flash_attn_2_available, logging
import torch
import torch.nn.functional as F
from .configuration_modernbert import ModernBertConfig
from torch import nn
from ...modeling_outputs import BaseModelOutput, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
@auto_docstring(custom_intro='\n The ModernBert Model with a sequence classification head on top that performs pooling.\n ')
class ModernBertForSequenceClassification(ModernBertPreTrainedModel):
def __init__(self, config: ModernBertConfig):
super().__init__(config)
self.num_labels = config.num_labels
self.config = config
self.model = ModernBertModel(config)
self.head = ModernBertPredictionHead(config)
self.drop = torch.nn.Dropout(config.classifier_dropout)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.post_init()
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, sliding_window_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, indices: Optional[torch.Tensor]=None, cu_seqlens: Optional[torch.Tensor]=None, max_seqlen: Optional[int]=None, batch_size: Optional[int]=None, seq_len: Optional[int]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, **kwargs) -> Union[tuple[torch.Tensor], SequenceClassifierOutput]:
"""
sliding_window_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding or far-away tokens. In ModernBert, only every few layers
perform global attention, while the rest perform local attention. This mask is used to avoid attending to
far-away tokens in the local attention layers when not using Flash Attention.
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
indices (`torch.Tensor` of shape `(total_unpadded_tokens,)`, *optional*):
Indices of the non-padding tokens in the input sequence. Used for unpadding the output.
cu_seqlens (`torch.Tensor` of shape `(batch + 1,)`, *optional*):
Cumulative sequence lengths of the input sequences. Used to index the unpadded tensors.
max_seqlen (`int`, *optional*):
Maximum sequence length in the batch excluding padding tokens. Used to unpad input_ids and pad output tensors.
batch_size (`int`, *optional*):
Batch size of the input sequences. Used to pad the output tensors.
seq_len (`int`, *optional*):
Sequence length of the input sequences including padding tokens. Used to pad the output tensors.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
self._maybe_set_compile()
if input_ids is not None:
self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
if batch_size is None and seq_len is None:
if inputs_embeds is not None:
batch_size, seq_len = inputs_embeds.shape[:2]
else:
batch_size, seq_len = input_ids.shape[:2]
device = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
attention_mask = torch.ones((batch_size, seq_len), device=device, dtype=torch.bool)
outputs = self.model(input_ids=input_ids, attention_mask=attention_mask, sliding_window_mask=sliding_window_mask, position_ids=position_ids, inputs_embeds=inputs_embeds, indices=indices, cu_seqlens=cu_seqlens, max_seqlen=max_seqlen, batch_size=batch_size, seq_len=seq_len, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
last_hidden_state = outputs[0]
if self.config.classifier_pooling == 'cls':
last_hidden_state = last_hidden_state[:, 0]
elif self.config.classifier_pooling == 'mean':
last_hidden_state = (last_hidden_state * attention_mask.unsqueeze(-1)).sum(dim=1) / attention_mask.sum(dim=1, keepdim=True)
pooled_output = self.head(last_hidden_state)
pooled_output = self.drop(pooled_output)
logits = self.classifier(pooled_output)
loss = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
self.config.problem_type = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = 'single_label_classification'
else:
self.config.problem_type = 'multi_label_classification'
if self.config.problem_type == 'regression':
loss_fct = MSELoss()
if self.num_labels == 1:
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(logits, labels)
elif self.config.problem_type == 'single_label_classification':
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
elif self.config.problem_type == 'multi_label_classification':
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
if not return_dict:
output = (logits,)
return (loss,) + output if loss is not None else output
return SequenceClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
|
@auto_docstring(custom_intro='\n The ModernBert Model with a sequence classification head on top that performs pooling.\n ')
class ModernBertForSequenceClassification(ModernBertPreTrainedModel):
def __init__(self, config: ModernBertConfig):
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, sliding_window_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, indices: Optional[torch.Tensor]=None, cu_seqlens: Optional[torch.Tensor]=None, max_seqlen: Optional[int]=None, batch_size: Optional[int]=None, seq_len: Optional[int]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, **kwargs) -> Union[tuple[torch.Tensor], SequenceClassifierOutput]:
'''
sliding_window_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding or far-away tokens. In ModernBert, only every few layers
perform global attention, while the rest perform local attention. This mask is used to avoid attending to
far-away tokens in the local attention layers when not using Flash Attention.
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
indices (`torch.Tensor` of shape `(total_unpadded_tokens,)`, *optional*):
Indices of the non-padding tokens in the input sequence. Used for unpadding the output.
cu_seqlens (`torch.Tensor` of shape `(batch + 1,)`, *optional*):
Cumulative sequence lengths of the input sequences. Used to index the unpadded tensors.
max_seqlen (`int`, *optional*):
Maximum sequence length in the batch excluding padding tokens. Used to unpad input_ids and pad output tensors.
batch_size (`int`, *optional*):
Batch size of the input sequences. Used to pad the output tensors.
seq_len (`int`, *optional*):
Sequence length of the input sequences including padding tokens. Used to pad the output tensors.
'''
pass
| 5
| 1
| 50
| 5
| 42
| 4
| 8
| 0.08
| 1
| 8
| 4
| 0
| 2
| 6
| 2
| 6
| 108
| 10
| 91
| 34
| 65
| 7
| 41
| 16
| 38
| 14
| 2
| 3
| 15
|
3,957
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/modernbert/modeling_modernbert.py
|
transformers.models.modernbert.modeling_modernbert.ModernBertForTokenClassification
|
import torch.nn.functional as F
import torch
from typing import Optional, Union
from ...utils import auto_docstring, is_flash_attn_2_available, logging
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...modeling_outputs import BaseModelOutput, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput
from torch import nn
from .configuration_modernbert import ModernBertConfig
@auto_docstring(custom_intro='\n The ModernBert Model with a token classification head on top, e.g. for Named Entity Recognition (NER) tasks.\n ')
class ModernBertForTokenClassification(ModernBertPreTrainedModel):
def __init__(self, config: ModernBertConfig):
super().__init__(config)
self.num_labels = config.num_labels
self.model = ModernBertModel(config)
self.head = ModernBertPredictionHead(config)
self.drop = torch.nn.Dropout(config.classifier_dropout)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.post_init()
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, sliding_window_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, indices: Optional[torch.Tensor]=None, cu_seqlens: Optional[torch.Tensor]=None, max_seqlen: Optional[int]=None, batch_size: Optional[int]=None, seq_len: Optional[int]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.Tensor], TokenClassifierOutput]:
"""
sliding_window_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding or far-away tokens. In ModernBert, only every few layers
perform global attention, while the rest perform local attention. This mask is used to avoid attending to
far-away tokens in the local attention layers when not using Flash Attention.
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
indices (`torch.Tensor` of shape `(total_unpadded_tokens,)`, *optional*):
Indices of the non-padding tokens in the input sequence. Used for unpadding the output.
cu_seqlens (`torch.Tensor` of shape `(batch + 1,)`, *optional*):
Cumulative sequence lengths of the input sequences. Used to index the unpadded tensors.
max_seqlen (`int`, *optional*):
Maximum sequence length in the batch excluding padding tokens. Used to unpad input_ids and pad output tensors.
batch_size (`int`, *optional*):
Batch size of the input sequences. Used to pad the output tensors.
seq_len (`int`, *optional*):
Sequence length of the input sequences including padding tokens. Used to pad the output tensors.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
self._maybe_set_compile()
outputs = self.model(input_ids=input_ids, attention_mask=attention_mask, sliding_window_mask=sliding_window_mask, position_ids=position_ids, inputs_embeds=inputs_embeds, indices=indices, cu_seqlens=cu_seqlens, max_seqlen=max_seqlen, batch_size=batch_size, seq_len=seq_len, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
last_hidden_state = outputs[0]
last_hidden_state = self.head(last_hidden_state)
last_hidden_state = self.drop(last_hidden_state)
logits = self.classifier(last_hidden_state)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[1:]
return (loss,) + output if loss is not None else output
return TokenClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
|
@auto_docstring(custom_intro='\n The ModernBert Model with a token classification head on top, e.g. for Named Entity Recognition (NER) tasks.\n ')
class ModernBertForTokenClassification(ModernBertPreTrainedModel):
def __init__(self, config: ModernBertConfig):
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, sliding_window_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, indices: Optional[torch.Tensor]=None, cu_seqlens: Optional[torch.Tensor]=None, max_seqlen: Optional[int]=None, batch_size: Optional[int]=None, seq_len: Optional[int]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.Tensor], TokenClassifierOutput]:
'''
sliding_window_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding or far-away tokens. In ModernBert, only every few layers
perform global attention, while the rest perform local attention. This mask is used to avoid attending to
far-away tokens in the local attention layers when not using Flash Attention.
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
indices (`torch.Tensor` of shape `(total_unpadded_tokens,)`, *optional*):
Indices of the non-padding tokens in the input sequence. Used for unpadding the output.
cu_seqlens (`torch.Tensor` of shape `(batch + 1,)`, *optional*):
Cumulative sequence lengths of the input sequences. Used to index the unpadded tensors.
max_seqlen (`int`, *optional*):
Maximum sequence length in the batch excluding padding tokens. Used to unpad input_ids and pad output tensors.
batch_size (`int`, *optional*):
Batch size of the input sequences. Used to pad the output tensors.
seq_len (`int`, *optional*):
Sequence length of the input sequences including padding tokens. Used to pad the output tensors.
'''
pass
| 5
| 1
| 36
| 4
| 30
| 3
| 3
| 0.08
| 1
| 8
| 4
| 0
| 2
| 5
| 2
| 6
| 79
| 8
| 66
| 31
| 41
| 5
| 25
| 14
| 22
| 5
| 2
| 1
| 6
|
3,958
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/modernbert/modeling_modernbert.py
|
transformers.models.modernbert.modeling_modernbert.ModernBertMLP
|
from torch import nn
from .configuration_modernbert import ModernBertConfig
import torch.nn.functional as F
import torch
from ...activations import ACT2FN
class ModernBertMLP(nn.Module):
"""Applies the GLU at the end of each ModernBERT layer.
Compared to the default BERT architecture, this block replaces :class:`~transformers.model.bert.modeling_bert.BertIntermediate`
and :class:`~transformers.model.bert.modeling_bert.SelfOutput` with a single module that has similar functionality.
"""
def __init__(self, config: ModernBertConfig):
super().__init__()
self.config = config
self.Wi = nn.Linear(config.hidden_size, int(config.intermediate_size) * 2, bias=config.mlp_bias)
self.act = ACT2FN[config.hidden_activation]
self.drop = nn.Dropout(config.mlp_dropout)
self.Wo = nn.Linear(config.intermediate_size, config.hidden_size, bias=config.mlp_bias)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
input, gate = self.Wi(hidden_states).chunk(2, dim=-1)
return self.Wo(self.drop(self.act(input) * gate))
|
class ModernBertMLP(nn.Module):
'''Applies the GLU at the end of each ModernBERT layer.
Compared to the default BERT architecture, this block replaces :class:`~transformers.model.bert.modeling_bert.BertIntermediate`
and :class:`~transformers.model.bert.modeling_bert.SelfOutput` with a single module that has similar functionality.
'''
def __init__(self, config: ModernBertConfig):
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
| 3
| 1
| 5
| 0
| 5
| 0
| 1
| 0.36
| 1
| 4
| 1
| 0
| 2
| 5
| 2
| 12
| 18
| 3
| 11
| 9
| 8
| 4
| 11
| 9
| 8
| 1
| 1
| 0
| 2
|
3,959
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/modernbert/modeling_modernbert.py
|
transformers.models.modernbert.modeling_modernbert.ModernBertModel
|
import torch.nn.functional as F
from .configuration_modernbert import ModernBertConfig
from torch import nn
from ...modeling_outputs import BaseModelOutput, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput
from typing import Optional, Union
from ...utils import auto_docstring, is_flash_attn_2_available, logging
import torch
from ...modeling_attn_mask_utils import _prepare_4d_attention_mask
@auto_docstring
class ModernBertModel(ModernBertPreTrainedModel):
def __init__(self, config: ModernBertConfig):
super().__init__(config)
self.config = config
self.embeddings = ModernBertEmbeddings(config)
self.layers = nn.ModuleList([ModernBertEncoderLayer(config, layer_id) for layer_id in range(config.num_hidden_layers)])
self.final_norm = nn.LayerNorm(config.hidden_size, eps=config.norm_eps, bias=config.norm_bias)
self.gradient_checkpointing = False
self.post_init()
def get_input_embeddings(self):
return self.embeddings.tok_embeddings
def set_input_embeddings(self, value):
self.embeddings.tok_embeddings = value
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, sliding_window_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.Tensor]=None, indices: Optional[torch.Tensor]=None, cu_seqlens: Optional[torch.Tensor]=None, max_seqlen: Optional[int]=None, batch_size: Optional[int]=None, seq_len: Optional[int]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.Tensor, ...], BaseModelOutput]:
"""
sliding_window_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding or far-away tokens. In ModernBert, only every few layers
perform global attention, while the rest perform local attention. This mask is used to avoid attending to
far-away tokens in the local attention layers when not using Flash Attention.
indices (`torch.Tensor` of shape `(total_unpadded_tokens,)`, *optional*):
Indices of the non-padding tokens in the input sequence. Used for unpadding the output.
cu_seqlens (`torch.Tensor` of shape `(batch + 1,)`, *optional*):
Cumulative sequence lengths of the input sequences. Used to index the unpadded tensors.
max_seqlen (`int`, *optional*):
Maximum sequence length in the batch excluding padding tokens. Used to unpad input_ids and pad output tensors.
batch_size (`int`, *optional*):
Batch size of the input sequences. Used to pad the output tensors.
seq_len (`int`, *optional*):
Sequence length of the input sequences including padding tokens. Used to pad the output tensors.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError('You must specify exactly one of input_ids or inputs_embeds')
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
self._maybe_set_compile()
if input_ids is not None:
self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
if batch_size is None and seq_len is None:
if inputs_embeds is not None:
batch_size, seq_len = inputs_embeds.shape[:2]
else:
batch_size, seq_len = input_ids.shape[:2]
device = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
attention_mask = torch.ones((batch_size, seq_len), device=device, dtype=torch.bool)
repad = False
if self.config._attn_implementation == 'flash_attention_2':
if indices is None and cu_seqlens is None and (max_seqlen is None):
repad = True
if inputs_embeds is None:
with torch.no_grad():
input_ids, indices, cu_seqlens, max_seqlen, *_ = _unpad_modernbert_input(inputs=input_ids, attention_mask=attention_mask)
else:
inputs_embeds, indices, cu_seqlens, max_seqlen, *_ = _unpad_modernbert_input(inputs=inputs_embeds, attention_mask=attention_mask)
else:
if position_ids is None:
position_ids = torch.arange(seq_len, device=device).unsqueeze(0)
attention_mask, sliding_window_mask = self._update_attention_mask(attention_mask, output_attentions=output_attentions)
hidden_states = self.embeddings(input_ids=input_ids, inputs_embeds=inputs_embeds)
for encoder_layer in self.layers:
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_outputs = encoder_layer(hidden_states, attention_mask=attention_mask, sliding_window_mask=sliding_window_mask, position_ids=position_ids, cu_seqlens=cu_seqlens, max_seqlen=max_seqlen, output_attentions=output_attentions)
hidden_states = layer_outputs[0]
if output_attentions and len(layer_outputs) > 1:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
hidden_states = self.final_norm(hidden_states)
if repad:
hidden_states = _pad_modernbert_output(inputs=hidden_states, indices=indices, batch=batch_size, seqlen=seq_len)
if all_hidden_states is not None:
all_hidden_states = tuple((_pad_modernbert_output(inputs=hs, indices=indices, batch=batch_size, seqlen=seq_len) for hs in all_hidden_states))
if not return_dict:
return tuple((v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None))
return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions)
def _update_attention_mask(self, attention_mask: torch.Tensor, output_attentions: bool) -> torch.Tensor:
if output_attentions:
if self.config._attn_implementation == 'sdpa':
logger.warning_once('Outputting attentions is only supported with the \'eager\' attention implementation, not with "sdpa". Falling back to `attn_implementation="eager"`.')
self.config._attn_implementation = 'eager'
elif self.config._attn_implementation != 'eager':
logger.warning_once(f'Outputting attentions is only supported with the eager attention implementation, not with {self.config._attn_implementation}. Consider setting `attn_implementation="eager"`. Setting `output_attentions=False`.')
global_attention_mask = _prepare_4d_attention_mask(attention_mask, self.dtype)
rows = torch.arange(global_attention_mask.shape[2]).unsqueeze(0)
distance = torch.abs(rows - rows.T)
window_mask = (distance <= self.config.local_attention // 2).unsqueeze(0).unsqueeze(0).to(attention_mask.device)
sliding_window_mask = global_attention_mask.masked_fill(window_mask.logical_not(), torch.finfo(self.dtype).min)
return (global_attention_mask, sliding_window_mask)
|
@auto_docstring
class ModernBertModel(ModernBertPreTrainedModel):
def __init__(self, config: ModernBertConfig):
pass
def get_input_embeddings(self):
pass
def set_input_embeddings(self, value):
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, sliding_window_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.Tensor]=None, indices: Optional[torch.Tensor]=None, cu_seqlens: Optional[torch.Tensor]=None, max_seqlen: Optional[int]=None, batch_size: Optional[int]=None, seq_len: Optional[int]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.Tensor, ...], BaseModelOutput]:
'''
sliding_window_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding or far-away tokens. In ModernBert, only every few layers
perform global attention, while the rest perform local attention. This mask is used to avoid attending to
far-away tokens in the local attention layers when not using Flash Attention.
indices (`torch.Tensor` of shape `(total_unpadded_tokens,)`, *optional*):
Indices of the non-padding tokens in the input sequence. Used for unpadding the output.
cu_seqlens (`torch.Tensor` of shape `(batch + 1,)`, *optional*):
Cumulative sequence lengths of the input sequences. Used to index the unpadded tensors.
max_seqlen (`int`, *optional*):
Maximum sequence length in the batch excluding padding tokens. Used to unpad input_ids and pad output tensors.
batch_size (`int`, *optional*):
Batch size of the input sequences. Used to pad the output tensors.
seq_len (`int`, *optional*):
Sequence length of the input sequences including padding tokens. Used to pad the output tensors.
'''
pass
def _update_attention_mask(self, attention_mask: torch.Tensor, output_attentions: bool) -> torch.Tensor:
pass
| 8
| 1
| 32
| 4
| 28
| 1
| 6
| 0.03
| 1
| 11
| 4
| 0
| 5
| 5
| 5
| 9
| 172
| 23
| 145
| 39
| 118
| 4
| 74
| 23
| 68
| 24
| 2
| 4
| 31
|
3,960
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/modernbert/modeling_modernbert.py
|
transformers.models.modernbert.modeling_modernbert.ModernBertPreTrainedModel
|
from ...utils import auto_docstring, is_flash_attn_2_available, logging
from ...modeling_utils import PreTrainedModel
from torch import nn
import math
from .configuration_modernbert import ModernBertConfig
from ...utils.import_utils import is_triton_available
from typing import Optional, Union
@auto_docstring
class ModernBertPreTrainedModel(PreTrainedModel):
config: ModernBertConfig
base_model_prefix = 'model'
supports_gradient_checkpointing = True
_no_split_modules = ['ModernBertEmbeddings', 'ModernBertEncoderLayer']
_supports_flash_attn = True
_supports_sdpa = True
_supports_flex_attn = False
def _init_weights(self, module: nn.Module):
cutoff_factor = self.config.initializer_cutoff_factor
if cutoff_factor is None:
cutoff_factor = 3
def init_weight(module: nn.Module, std: float):
nn.init.trunc_normal_(module.weight, mean=0.0, std=std, a=-cutoff_factor * std, b=cutoff_factor * std)
if isinstance(module, nn.Linear):
if module.bias is not None:
nn.init.zeros_(module.bias)
stds = {'in': self.config.initializer_range, 'out': self.config.initializer_range / math.sqrt(2.0 * self.config.num_hidden_layers), 'embedding': self.config.initializer_range, 'final_out': self.config.hidden_size ** (-0.5)}
if isinstance(module, ModernBertEmbeddings):
init_weight(module.tok_embeddings, stds['embedding'])
elif isinstance(module, ModernBertMLP):
init_weight(module.Wi, stds['in'])
init_weight(module.Wo, stds['out'])
elif isinstance(module, ModernBertAttention):
init_weight(module.Wqkv, stds['in'])
init_weight(module.Wo, stds['out'])
elif isinstance(module, ModernBertPredictionHead):
init_weight(module.dense, stds['out'])
elif isinstance(module, ModernBertForMaskedLM):
init_weight(module.decoder, stds['out'])
elif isinstance(module, (ModernBertForSequenceClassification, ModernBertForMultipleChoice, ModernBertForTokenClassification, ModernBertForQuestionAnswering)):
init_weight(module.classifier, stds['final_out'])
elif isinstance(module, nn.LayerNorm):
module.weight.data.fill_(1.0)
if module.bias is not None:
module.bias.data.zero_()
def _check_and_adjust_attn_implementation(self, attn_implementation: Optional[str], is_init_check: bool=False) -> str:
"""
Checks and dispatches to hhe requested attention implementation.
"""
try:
attn_implementation = 'flash_attention_2' if attn_implementation is None and self._flash_attn_2_can_dispatch() else attn_implementation
except (ValueError, ImportError):
pass
return super()._check_and_adjust_attn_implementation(attn_implementation=attn_implementation, is_init_check=is_init_check)
def _maybe_set_compile(self):
if self.config.reference_compile is False:
return
if hasattr(self, 'hf_device_map') and len(self.hf_device_map) > 1:
if self.config.reference_compile:
logger.warning_once('If `accelerate` split the model across devices, `torch.compile` will not work. Falling back to non-compiled mode.')
self.config.reference_compile = False
if self.device.type == 'mps':
if self.config.reference_compile:
logger.warning_once('Compiling the model with `torch.compile` and using a `torch.mps` device is not supported. Falling back to non-compiled mode.')
self.config.reference_compile = False
if self.device.type == 'cpu':
if self.config.reference_compile:
logger.warning_once('Compiling the model with `torch.compile` and using a `torch.cpu` device is not supported. Falling back to non-compiled mode.')
self.config.reference_compile = False
if self.config.reference_compile is None:
self.config.reference_compile = is_triton_available()
def resize_token_embeddings(self, *args, **kwargs):
model_embeds = super().resize_token_embeddings(*args, **kwargs)
if self.config.reference_compile in {True, None}:
if self.config.reference_compile:
logger.warning_once('Resizing token embeddings with `torch.compile` is not supported. Falling back to non-compiled mode.')
self.config.reference_compile = False
return model_embeds
|
@auto_docstring
class ModernBertPreTrainedModel(PreTrainedModel):
def _init_weights(self, module: nn.Module):
pass
def init_weight(module: nn.Module, std: float):
pass
def _check_and_adjust_attn_implementation(self, attn_implementation: Optional[str], is_init_check: bool=False) -> str:
'''
Checks and dispatches to hhe requested attention implementation.
'''
pass
def _maybe_set_compile(self):
pass
def resize_token_embeddings(self, *args, **kwargs):
pass
| 7
| 1
| 25
| 2
| 22
| 1
| 5
| 0.04
| 1
| 14
| 7
| 4
| 3
| 0
| 4
| 4
| 124
| 14
| 106
| 24
| 92
| 4
| 59
| 16
| 53
| 9
| 1
| 2
| 26
|
3,961
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/modernbert/modeling_modernbert.py
|
transformers.models.modernbert.modeling_modernbert.ModernBertPredictionHead
|
import torch.nn.functional as F
import torch
from ...activations import ACT2FN
from torch import nn
from .configuration_modernbert import ModernBertConfig
class ModernBertPredictionHead(nn.Module):
def __init__(self, config: ModernBertConfig):
super().__init__()
self.config = config
self.dense = nn.Linear(config.hidden_size, config.hidden_size, config.classifier_bias)
self.act = ACT2FN[config.classifier_activation]
self.norm = nn.LayerNorm(config.hidden_size, eps=config.norm_eps, bias=config.norm_bias)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
return self.norm(self.act(self.dense(hidden_states)))
|
class ModernBertPredictionHead(nn.Module):
def __init__(self, config: ModernBertConfig):
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 4
| 0
| 4
| 0
| 1
| 0
| 1
| 3
| 1
| 0
| 2
| 4
| 2
| 12
| 10
| 1
| 9
| 7
| 6
| 0
| 9
| 7
| 6
| 1
| 1
| 0
| 2
|
3,962
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/modernbert/modeling_modernbert.py
|
transformers.models.modernbert.modeling_modernbert.ModernBertRotaryEmbedding
|
from torch import nn
from .configuration_modernbert import ModernBertConfig
from ...modeling_rope_utils import ROPE_INIT_FUNCTIONS, dynamic_rope_update
import torch.nn.functional as F
import torch
class ModernBertRotaryEmbedding(nn.Module):
inv_freq: torch.Tensor
def __init__(self, config: ModernBertConfig, device=None):
super().__init__()
if hasattr(config, 'rope_scaling') and isinstance(config.rope_scaling, dict):
self.rope_type = config.rope_scaling.get('rope_type', config.rope_scaling.get('type'))
else:
self.rope_type = 'default'
self.max_seq_len_cached = config.max_position_embeddings
self.original_max_seq_len = config.max_position_embeddings
self.config = config
self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device)
self.register_buffer('inv_freq', inv_freq, persistent=False)
self.original_inv_freq = self.inv_freq
@torch.no_grad()
@dynamic_rope_update
def forward(self, x, position_ids):
inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device)
position_ids_expanded = position_ids[:, None, :].float()
device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != 'mps' else 'cpu'
with torch.autocast(device_type=device_type, enabled=False):
freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
emb = torch.cat((freqs, freqs), dim=-1)
cos = emb.cos() * self.attention_scaling
sin = emb.sin() * self.attention_scaling
return (cos.to(dtype=x.dtype), sin.to(dtype=x.dtype))
|
class ModernBertRotaryEmbedding(nn.Module):
def __init__(self, config: ModernBertConfig, device=None):
pass
@torch.no_grad()
@dynamic_rope_update
def forward(self, x, position_ids):
pass
| 5
| 0
| 18
| 2
| 13
| 5
| 3
| 0.35
| 1
| 6
| 1
| 0
| 3
| 7
| 3
| 13
| 58
| 7
| 40
| 21
| 35
| 14
| 38
| 20
| 34
| 3
| 1
| 1
| 8
|
3,963
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/modernbert/modeling_modernbert.py
|
transformers.models.modernbert.modeling_modernbert.ModernBertUnpaddedRotaryEmbedding
|
import torch
from typing import Optional, Union
import torch.nn.functional as F
class ModernBertUnpaddedRotaryEmbedding(RotaryEmbedding):
"""
The rotary position embeddings applied directly to unpadded sequences.
"""
def __init__(self, dim: int, base: float=10000.0, max_seqlen: Optional[int]=None, device: Optional[torch.device]=None, dtype: Optional[torch.dtype]=None):
"""
max_seqlen: if max_seqlen, device, and dtype are provided, we precompute the cos_sin_cache
up to max_seqlen. If the max_seqlen, device, or dtype during training/inference differ,
the cos_sin_cache will be recomputed during the forward pass.
"""
super().__init__(dim=dim, base=base, device=device, interleaved=False)
self.max_seqlen = max_seqlen
if max_seqlen is not None and device is not None and (dtype is not None):
self._update_cos_sin_cache(max_seqlen, device=device, dtype=dtype)
def forward(self, qkv: torch.Tensor, cu_seqlens: torch.Tensor, max_seqlen: Optional[int]=None) -> Union[torch.Tensor, tuple[torch.Tensor, torch.Tensor]]:
"""
Apply rotary embedding *inplace* to qkv.
qkv: (total_nnz, 3, nheads, headdim)
cu_seqlens: (batch + 1,) cumulative sequence lengths
max_seqlen: int max seq length in the batch
"""
if max_seqlen is not None:
self._update_cos_sin_cache(max_seqlen, device=qkv.device, dtype=qkv.dtype)
qkv = apply_rotary_unpadded(qkv, self._cos_cached, self._sin_cached, cu_seqlens=cu_seqlens, max_seqlen=max_seqlen)
return qkv
def extra_repr(self) -> str:
return f'dim={self.dim}, base={self.base}, scale_base={self.scale_base}'
|
class ModernBertUnpaddedRotaryEmbedding(RotaryEmbedding):
'''
The rotary position embeddings applied directly to unpadded sequences.
'''
def __init__(self, dim: int, base: float=10000.0, max_seqlen: Optional[int]=None, device: Optional[torch.device]=None, dtype: Optional[torch.dtype]=None):
'''
max_seqlen: if max_seqlen, device, and dtype are provided, we precompute the cos_sin_cache
up to max_seqlen. If the max_seqlen, device, or dtype during training/inference differ,
the cos_sin_cache will be recomputed during the forward pass.
'''
pass
def forward(self, qkv: torch.Tensor, cu_seqlens: torch.Tensor, max_seqlen: Optional[int]=None) -> Union[torch.Tensor, tuple[torch.Tensor, torch.Tensor]]:
'''
Apply rotary embedding *inplace* to qkv.
qkv: (total_nnz, 3, nheads, headdim)
cu_seqlens: (batch + 1,) cumulative sequence lengths
max_seqlen: int max seq length in the batch
'''
pass
def extra_repr(self) -> str:
pass
| 4
| 3
| 15
| 1
| 10
| 4
| 2
| 0.45
| 1
| 5
| 0
| 0
| 3
| 3
| 3
| 3
| 51
| 6
| 31
| 19
| 15
| 14
| 13
| 5
| 9
| 2
| 1
| 1
| 5
|
3,964
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/modernbert/modular_modernbert.py
|
transformers.models.modernbert.modular_modernbert.ApplyRotaryEmbUnpad
|
from typing import Literal, Optional, Union
import torch
import torch.nn.functional as F
class ApplyRotaryEmbUnpad(torch.autograd.Function):
@staticmethod
def forward(ctx, qkv, cos, sin, cu_seqlens: Optional[torch.Tensor]=None, max_seqlen: Optional[int]=None):
qkv = qkv.contiguous()
total_nnz, _three, _nheads, headdim = qkv.shape
qk = qkv[:, :2].view(total_nnz, -1, headdim)
apply_rotary(qk, cos, sin, seqlen_offsets=0, cu_seqlens=cu_seqlens, max_seqlen=max_seqlen, interleaved=False, inplace=True)
ctx.save_for_backward(cos, sin, cu_seqlens)
ctx.max_seqlen = max_seqlen
return qkv
@staticmethod
def backward(ctx, do):
cos, sin, cu_seqlens = ctx.saved_tensors
do = do.contiguous()
total_nnz, _three, _nheads, headdim = do.shape
dqk = do[:, :2].view(total_nnz, -1, headdim)
apply_rotary(dqk, cos, sin, seqlen_offsets=0, cu_seqlens=cu_seqlens, max_seqlen=ctx.max_seqlen, interleaved=False, inplace=True, conjugate=True)
return (do, None, None, None, None, None, None)
|
class ApplyRotaryEmbUnpad(torch.autograd.Function):
@staticmethod
def forward(ctx, qkv, cos, sin, cu_seqlens: Optional[torch.Tensor]=None, max_seqlen: Optional[int]=None):
pass
@staticmethod
def backward(ctx, do):
pass
| 5
| 0
| 25
| 1
| 21
| 3
| 1
| 0.14
| 1
| 2
| 0
| 0
| 0
| 0
| 2
| 32
| 53
| 3
| 44
| 17
| 32
| 6
| 16
| 8
| 13
| 1
| 5
| 0
| 2
|
3,965
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/modernbert/modular_modernbert.py
|
transformers.models.modernbert.modular_modernbert.ModernBertAttention
|
from typing import Literal, Optional, Union
from torch import nn
import torch.nn.functional as F
import copy
import torch
class ModernBertAttention(nn.Module):
"""Performs multi-headed self attention on a batch of unpadded sequences.
If Flash Attention 2 is installed, this module uses Flash Attention to improve throughput.
If Flash Attention 2 is not installed, the implementation will use PyTorch's SDPA kernel,
which requires padding and unpadding inputs, adding some overhead.
See `forward` method for additional details.
"""
def __init__(self, config: ModernBertConfig, layer_id: Optional[int]=None):
super().__init__()
self.config = config
self.layer_id = layer_id
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(f'The hidden size ({config.hidden_size}) is not a multiple of the number of attention heads ({config.num_attention_heads})')
self.attention_dropout = config.attention_dropout
self.deterministic_flash_attn = config.deterministic_flash_attn
self.num_heads = config.num_attention_heads
self.head_dim = config.hidden_size // config.num_attention_heads
self.all_head_size = self.head_dim * self.num_heads
self.Wqkv = nn.Linear(config.hidden_size, 3 * self.all_head_size, bias=config.attention_bias)
if layer_id % config.global_attn_every_n_layers != 0:
self.local_attention = (config.local_attention // 2, config.local_attention // 2)
rope_theta = config.local_rope_theta if config.local_rope_theta is not None else config.global_rope_theta
max_position_embeddings = config.local_attention
else:
self.local_attention = (-1, -1)
max_position_embeddings = config.max_position_embeddings
rope_theta = config.global_rope_theta
if config._attn_implementation == 'flash_attention_2':
self.rotary_emb = ModernBertUnpaddedRotaryEmbedding(dim=self.head_dim, max_seqlen=max_position_embeddings, base=rope_theta)
else:
config_copy = copy.deepcopy(config)
config_copy.rope_theta = rope_theta
self.rotary_emb = ModernBertRotaryEmbedding(config=config_copy)
self.Wo = nn.Linear(config.hidden_size, config.hidden_size, bias=config.attention_bias)
self.out_drop = nn.Dropout(config.attention_dropout) if config.attention_dropout > 0.0 else nn.Identity()
self.pruned_heads = set()
def forward(self, hidden_states: torch.Tensor, output_attentions: Optional[bool]=False, **kwargs) -> torch.Tensor:
qkv = self.Wqkv(hidden_states)
bs = hidden_states.shape[0]
if self.config._attn_implementation == 'flash_attention_2':
qkv = qkv.view(-1, 3, self.num_heads, self.head_dim)
else:
qkv = qkv.view(bs, -1, 3, self.num_heads, self.head_dim)
attn_outputs = MODERNBERT_ATTENTION_FUNCTION[self.config._attn_implementation](self, qkv=qkv, rotary_emb=self.rotary_emb, local_attention=self.local_attention, bs=bs, dim=self.all_head_size, output_attentions=output_attentions, **kwargs)
hidden_states = attn_outputs[0]
hidden_states = self.out_drop(self.Wo(hidden_states))
return (hidden_states,) + attn_outputs[1:]
|
class ModernBertAttention(nn.Module):
'''Performs multi-headed self attention on a batch of unpadded sequences.
If Flash Attention 2 is installed, this module uses Flash Attention to improve throughput.
If Flash Attention 2 is not installed, the implementation will use PyTorch's SDPA kernel,
which requires padding and unpadding inputs, adding some overhead.
See `forward` method for additional details.
'''
def __init__(self, config: ModernBertConfig, layer_id: Optional[int]=None):
pass
def forward(self, hidden_states: torch.Tensor, output_attentions: Optional[bool]=False, **kwargs) -> torch.Tensor:
pass
| 3
| 1
| 34
| 5
| 29
| 1
| 5
| 0.12
| 1
| 9
| 3
| 0
| 2
| 13
| 2
| 12
| 78
| 13
| 59
| 26
| 51
| 7
| 38
| 21
| 35
| 7
| 1
| 2
| 9
|
3,966
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/modernbert/modular_modernbert.py
|
transformers.models.modernbert.modular_modernbert.ModernBertConfig
|
from typing import Literal, Optional, Union
from ...configuration_utils import PretrainedConfig
class ModernBertConfig(PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`ModernBertModel`]. It is used to instantiate an ModernBert
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the ModernBERT-base.
e.g. [answerdotai/ModernBERT-base](https://huggingface.co/answerdotai/ModernBERT-base)
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 50368):
Vocabulary size of the ModernBert model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`ModernBertModel`]
hidden_size (`int`, *optional*, defaults to 768):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 1152):
Dimension of the MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 22):
Number of hidden layers in the Transformer decoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer decoder.
hidden_activation (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the decoder. Will default to `"gelu"`
if not specified.
max_position_embeddings (`int`, *optional*, defaults to 8192):
The maximum sequence length that this model might ever be used with.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
initializer_cutoff_factor (`float`, *optional*, defaults to 2.0):
The cutoff factor for the truncated_normal_initializer for initializing all weight matrices.
norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the rms normalization layers.
norm_bias (`bool`, *optional*, defaults to `False`):
Whether to use bias in the normalization layers.
pad_token_id (`int`, *optional*, defaults to 50283):
Padding token id.
eos_token_id (`int`, *optional*, defaults to 50282):
End of stream token id.
bos_token_id (`int`, *optional*, defaults to 50281):
Beginning of stream token id.
cls_token_id (`int`, *optional*, defaults to 50281):
Classification token id.
sep_token_id (`int`, *optional*, defaults to 50282):
Separation token id.
global_rope_theta (`float`, *optional*, defaults to 160000.0):
The base period of the global RoPE embeddings.
attention_bias (`bool`, *optional*, defaults to `False`):
Whether to use a bias in the query, key, value and output projection layers during self-attention.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
global_attn_every_n_layers (`int`, *optional*, defaults to 3):
The number of layers between global attention layers.
local_attention (`int`, *optional*, defaults to 128):
The window size for local attention.
local_rope_theta (`float`, *optional*, defaults to 10000.0):
The base period of the local RoPE embeddings.
embedding_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the embeddings.
mlp_bias (`bool`, *optional*, defaults to `False`):
Whether to use bias in the MLP layers.
mlp_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the MLP layers.
decoder_bias (`bool`, *optional*, defaults to `True`):
Whether to use bias in the decoder layers.
classifier_pooling (`str`, *optional*, defaults to `"cls"`):
The pooling method for the classifier. Should be either `"cls"` or `"mean"`. In local attention layers, the
CLS token doesn't attend to all tokens on long sequences.
classifier_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the classifier.
classifier_bias (`bool`, *optional*, defaults to `False`):
Whether to use bias in the classifier.
classifier_activation (`str`, *optional*, defaults to `"gelu"`):
The activation function for the classifier.
deterministic_flash_attn (`bool`, *optional*, defaults to `False`):
Whether to use deterministic flash attention. If `False`, inference will be faster but not deterministic.
sparse_prediction (`bool`, *optional*, defaults to `False`):
Whether to use sparse prediction for the masked language model instead of returning the full dense logits.
sparse_pred_ignore_index (`int`, *optional*, defaults to -100):
The index to ignore for the sparse prediction.
reference_compile (`bool`, *optional*):
Whether to compile the layers of the model which were compiled during pretraining. If `None`, then parts of
the model will be compiled if 1) `triton` is installed, 2) the model is not on MPS, 3) the model is not
shared between devices, and 4) the model is not resized after initialization. If `True`, then the model may
be faster in some scenarios.
repad_logits_with_grad (`bool`, *optional*, defaults to `False`):
When True, ModernBertForMaskedLM keeps track of the logits' gradient when repadding for output. This only
applies when using Flash Attention 2 with passed labels. Otherwise output logits always have a gradient.
Examples:
```python
>>> from transformers import ModernBertModel, ModernBertConfig
>>> # Initializing a ModernBert style configuration
>>> configuration = ModernBertConfig()
>>> # Initializing a model from the modernbert-base style configuration
>>> model = ModernBertModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = 'modernbert'
attribute_map = {'rope_theta': 'global_rope_theta'}
keys_to_ignore_at_inference = ['past_key_values']
def __init__(self, vocab_size=50368, hidden_size=768, intermediate_size=1152, num_hidden_layers=22, num_attention_heads=12, hidden_activation='gelu', max_position_embeddings=8192, initializer_range=0.02, initializer_cutoff_factor=2.0, norm_eps=1e-05, norm_bias=False, pad_token_id=50283, eos_token_id=50282, bos_token_id=50281, cls_token_id=50281, sep_token_id=50282, global_rope_theta=160000.0, attention_bias=False, attention_dropout=0.0, global_attn_every_n_layers=3, local_attention=128, local_rope_theta=10000.0, embedding_dropout=0.0, mlp_bias=False, mlp_dropout=0.0, decoder_bias=True, classifier_pooling: Literal['cls', 'mean']='cls', classifier_dropout=0.0, classifier_bias=False, classifier_activation='gelu', deterministic_flash_attn=False, sparse_prediction=False, sparse_pred_ignore_index=-100, reference_compile=None, repad_logits_with_grad=False, **kwargs):
super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, cls_token_id=cls_token_id, sep_token_id=sep_token_id, **kwargs)
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.initializer_range = initializer_range
self.initializer_cutoff_factor = initializer_cutoff_factor
self.norm_eps = norm_eps
self.norm_bias = norm_bias
self.global_rope_theta = global_rope_theta
self.attention_bias = attention_bias
self.attention_dropout = attention_dropout
self.hidden_activation = hidden_activation
self.global_attn_every_n_layers = global_attn_every_n_layers
self.local_attention = local_attention
self.local_rope_theta = local_rope_theta
self.embedding_dropout = embedding_dropout
self.mlp_bias = mlp_bias
self.mlp_dropout = mlp_dropout
self.decoder_bias = decoder_bias
self.classifier_pooling = classifier_pooling
self.classifier_dropout = classifier_dropout
self.classifier_bias = classifier_bias
self.classifier_activation = classifier_activation
self.deterministic_flash_attn = deterministic_flash_attn
self.sparse_prediction = sparse_prediction
self.sparse_pred_ignore_index = sparse_pred_ignore_index
self.reference_compile = reference_compile
self.repad_logits_with_grad = repad_logits_with_grad
if self.classifier_pooling not in ['cls', 'mean']:
raise ValueError(f'Invalid value for `classifier_pooling`, should be either "cls" or "mean", but is {self.classifier_pooling}.')
def to_dict(self):
output = super().to_dict()
output.pop('reference_compile', None)
return output
|
class ModernBertConfig(PretrainedConfig):
'''
This is the configuration class to store the configuration of a [`ModernBertModel`]. It is used to instantiate an ModernBert
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the ModernBERT-base.
e.g. [answerdotai/ModernBERT-base](https://huggingface.co/answerdotai/ModernBERT-base)
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 50368):
Vocabulary size of the ModernBert model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`ModernBertModel`]
hidden_size (`int`, *optional*, defaults to 768):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 1152):
Dimension of the MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 22):
Number of hidden layers in the Transformer decoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer decoder.
hidden_activation (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the decoder. Will default to `"gelu"`
if not specified.
max_position_embeddings (`int`, *optional*, defaults to 8192):
The maximum sequence length that this model might ever be used with.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
initializer_cutoff_factor (`float`, *optional*, defaults to 2.0):
The cutoff factor for the truncated_normal_initializer for initializing all weight matrices.
norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the rms normalization layers.
norm_bias (`bool`, *optional*, defaults to `False`):
Whether to use bias in the normalization layers.
pad_token_id (`int`, *optional*, defaults to 50283):
Padding token id.
eos_token_id (`int`, *optional*, defaults to 50282):
End of stream token id.
bos_token_id (`int`, *optional*, defaults to 50281):
Beginning of stream token id.
cls_token_id (`int`, *optional*, defaults to 50281):
Classification token id.
sep_token_id (`int`, *optional*, defaults to 50282):
Separation token id.
global_rope_theta (`float`, *optional*, defaults to 160000.0):
The base period of the global RoPE embeddings.
attention_bias (`bool`, *optional*, defaults to `False`):
Whether to use a bias in the query, key, value and output projection layers during self-attention.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
global_attn_every_n_layers (`int`, *optional*, defaults to 3):
The number of layers between global attention layers.
local_attention (`int`, *optional*, defaults to 128):
The window size for local attention.
local_rope_theta (`float`, *optional*, defaults to 10000.0):
The base period of the local RoPE embeddings.
embedding_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the embeddings.
mlp_bias (`bool`, *optional*, defaults to `False`):
Whether to use bias in the MLP layers.
mlp_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the MLP layers.
decoder_bias (`bool`, *optional*, defaults to `True`):
Whether to use bias in the decoder layers.
classifier_pooling (`str`, *optional*, defaults to `"cls"`):
The pooling method for the classifier. Should be either `"cls"` or `"mean"`. In local attention layers, the
CLS token doesn't attend to all tokens on long sequences.
classifier_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the classifier.
classifier_bias (`bool`, *optional*, defaults to `False`):
Whether to use bias in the classifier.
classifier_activation (`str`, *optional*, defaults to `"gelu"`):
The activation function for the classifier.
deterministic_flash_attn (`bool`, *optional*, defaults to `False`):
Whether to use deterministic flash attention. If `False`, inference will be faster but not deterministic.
sparse_prediction (`bool`, *optional*, defaults to `False`):
Whether to use sparse prediction for the masked language model instead of returning the full dense logits.
sparse_pred_ignore_index (`int`, *optional*, defaults to -100):
The index to ignore for the sparse prediction.
reference_compile (`bool`, *optional*):
Whether to compile the layers of the model which were compiled during pretraining. If `None`, then parts of
the model will be compiled if 1) `triton` is installed, 2) the model is not on MPS, 3) the model is not
shared between devices, and 4) the model is not resized after initialization. If `True`, then the model may
be faster in some scenarios.
repad_logits_with_grad (`bool`, *optional*, defaults to `False`):
When True, ModernBertForMaskedLM keeps track of the logits' gradient when repadding for output. This only
applies when using Flash Attention 2 with passed labels. Otherwise output logits always have a gradient.
Examples:
```python
>>> from transformers import ModernBertModel, ModernBertConfig
>>> # Initializing a ModernBert style configuration
>>> configuration = ModernBertConfig()
>>> # Initializing a model from the modernbert-base style configuration
>>> model = ModernBertModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```'''
def __init__(self, vocab_size=50368, hidden_size=768, intermediate_size=1152, num_hidden_layers=22, num_attention_heads=12, hidden_activation='gelu', max_position_embeddings=8192, initializer_range=0.02, initializer_cutoff_factor=2.0, norm_eps=1e-05, norm_bias=False, pad_token_id=50283, eos_token_id=50282, bos_token_id=50281, cls_token_id=50281, sep_token_id=50282, global_rope_theta=160000.0, attention_bias=False, attention_dropout=0.0, global_attn_every_n_layers=3, local_attention=128, local_rope_theta=10000.0, embedding_dropout=0.0, mlp_bias=False, mlp_dropout=0.0, decoder_bias=True, classifier_pooling: Literal['cls', 'mean']='cls', classifier_dropout=0.0, classifier_bias=False, classifier_activation='gelu', deterministic_flash_attn=False, sparse_prediction=False, sparse_pred_ignore_index=-100, reference_compile=None, repad_logits_with_grad=False, **kwargs):
pass
def to_dict(self):
pass
| 3
| 1
| 82
| 1
| 81
| 0
| 2
| 1.13
| 1
| 2
| 0
| 0
| 1
| 30
| 1
| 33
| 189
| 10
| 84
| 72
| 44
| 95
| 37
| 34
| 35
| 2
| 2
| 1
| 2
|
3,967
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/modernbert/modular_modernbert.py
|
transformers.models.modernbert.modular_modernbert.ModernBertEmbeddings
|
from typing import Literal, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
class ModernBertEmbeddings(nn.Module):
"""
Same as BertEmbeddings with a tiny tweak for positional embeddings indexing.
"""
def __init__(self, config: ModernBertConfig):
super().__init__()
self.config = config
self.tok_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
self.norm = nn.LayerNorm(config.hidden_size, eps=config.norm_eps, bias=config.norm_bias)
self.drop = nn.Dropout(config.embedding_dropout)
@torch.compile(dynamic=True)
def compiled_embeddings(self, input_ids: torch.LongTensor) -> torch.Tensor:
return self.drop(self.norm(self.tok_embeddings(input_ids)))
def forward(self, input_ids: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.Tensor]=None) -> torch.Tensor:
if inputs_embeds is not None:
hidden_states = self.drop(self.norm(inputs_embeds))
else:
hidden_states = self.compiled_embeddings(input_ids) if self.config.reference_compile else self.drop(self.norm(self.tok_embeddings(input_ids)))
return hidden_states
|
class ModernBertEmbeddings(nn.Module):
'''
Same as BertEmbeddings with a tiny tweak for positional embeddings indexing.
'''
def __init__(self, config: ModernBertConfig):
pass
@torch.compile(dynamic=True)
def compiled_embeddings(self, input_ids: torch.LongTensor) -> torch.Tensor:
pass
def forward(self, input_ids: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.Tensor]=None) -> torch.Tensor:
pass
| 5
| 1
| 7
| 0
| 7
| 0
| 2
| 0.14
| 1
| 3
| 1
| 0
| 3
| 4
| 3
| 13
| 28
| 3
| 22
| 12
| 15
| 3
| 14
| 9
| 10
| 3
| 1
| 1
| 5
|
3,968
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/modernbert/modular_modernbert.py
|
transformers.models.modernbert.modular_modernbert.ModernBertEncoderLayer
|
import torch
from typing import Literal, Optional, Union
from ...modeling_layers import GradientCheckpointingLayer
from torch import nn
import torch.nn.functional as F
class ModernBertEncoderLayer(GradientCheckpointingLayer):
def __init__(self, config: ModernBertConfig, layer_id: Optional[int]=None):
super().__init__()
self.config = config
if layer_id == 0:
self.attn_norm = nn.Identity()
else:
self.attn_norm = nn.LayerNorm(config.hidden_size, eps=config.norm_eps, bias=config.norm_bias)
self.attn = ModernBertAttention(config=config, layer_id=layer_id)
self.mlp_norm = nn.LayerNorm(config.hidden_size, eps=config.norm_eps, bias=config.norm_bias)
self.mlp = ModernBertMLP(config)
@torch.compile(dynamic=True)
def compiled_mlp(self, hidden_states: torch.Tensor) -> torch.Tensor:
return self.mlp(self.mlp_norm(hidden_states))
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, sliding_window_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, cu_seqlens: Optional[torch.Tensor]=None, max_seqlen: Optional[int]=None, output_attentions: Optional[bool]=False) -> torch.Tensor:
attn_outputs = self.attn(self.attn_norm(hidden_states), attention_mask=attention_mask, sliding_window_mask=sliding_window_mask, position_ids=position_ids, cu_seqlens=cu_seqlens, max_seqlen=max_seqlen, output_attentions=output_attentions)
hidden_states = hidden_states + attn_outputs[0]
mlp_output = self.compiled_mlp(hidden_states) if self.config.reference_compile else self.mlp(self.mlp_norm(hidden_states))
hidden_states = hidden_states + mlp_output
return (hidden_states,) + attn_outputs[1:]
|
class ModernBertEncoderLayer(GradientCheckpointingLayer):
def __init__(self, config: ModernBertConfig, layer_id: Optional[int]=None):
pass
@torch.compile(dynamic=True)
def compiled_mlp(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, sliding_window_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, cu_seqlens: Optional[torch.Tensor]=None, max_seqlen: Optional[int]=None, output_attentions: Optional[bool]=False) -> torch.Tensor:
pass
| 5
| 0
| 13
| 0
| 13
| 0
| 2
| 0.02
| 1
| 7
| 3
| 0
| 3
| 5
| 3
| 13
| 44
| 3
| 41
| 21
| 27
| 1
| 18
| 11
| 14
| 2
| 1
| 1
| 5
|
3,969
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/modernbert/modular_modernbert.py
|
transformers.models.modernbert.modular_modernbert.ModernBertForMaskedLM
|
from contextlib import nullcontext
from torch import nn
from ...utils import auto_docstring, is_flash_attn_2_available, logging
import torch.nn.functional as F
import torch
from ...modeling_outputs import BaseModelOutput, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput
from typing import Literal, Optional, Union
@auto_docstring(custom_intro='\n The ModernBert Model with a decoder head on top that is used for masked language modeling.\n ')
class ModernBertForMaskedLM(ModernBertPreTrainedModel):
_tied_weights_keys = ['decoder.weight']
def __init__(self, config: ModernBertConfig):
super().__init__(config)
self.config = config
self.model = ModernBertModel(config)
self.head = ModernBertPredictionHead(config)
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=config.decoder_bias)
self.sparse_prediction = self.config.sparse_prediction
self.sparse_pred_ignore_index = self.config.sparse_pred_ignore_index
self.post_init()
def get_output_embeddings(self):
return self.decoder
def set_output_embeddings(self, new_embeddings: nn.Linear):
self.decoder = new_embeddings
@torch.compile(dynamic=True)
def compiled_head(self, output: torch.Tensor) -> torch.Tensor:
return self.decoder(self.head(output))
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, sliding_window_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, indices: Optional[torch.Tensor]=None, cu_seqlens: Optional[torch.Tensor]=None, max_seqlen: Optional[int]=None, batch_size: Optional[int]=None, seq_len: Optional[int]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, **kwargs) -> Union[tuple[torch.Tensor], MaskedLMOutput]:
"""
sliding_window_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding or far-away tokens. In ModernBert, only every few layers
perform global attention, while the rest perform local attention. This mask is used to avoid attending to
far-away tokens in the local attention layers when not using Flash Attention.
indices (`torch.Tensor` of shape `(total_unpadded_tokens,)`, *optional*):
Indices of the non-padding tokens in the input sequence. Used for unpadding the output.
cu_seqlens (`torch.Tensor` of shape `(batch + 1,)`, *optional*):
Cumulative sequence lengths of the input sequences. Used to index the unpadded tensors.
max_seqlen (`int`, *optional*):
Maximum sequence length in the batch excluding padding tokens. Used to unpad input_ids and pad output tensors.
batch_size (`int`, *optional*):
Batch size of the input sequences. Used to pad the output tensors.
seq_len (`int`, *optional*):
Sequence length of the input sequences including padding tokens. Used to pad the output tensors.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
self._maybe_set_compile()
if self.config._attn_implementation == 'flash_attention_2':
if indices is None and cu_seqlens is None and (max_seqlen is None):
if batch_size is None and seq_len is None:
if inputs_embeds is not None:
batch_size, seq_len = inputs_embeds.shape[:2]
else:
batch_size, seq_len = input_ids.shape[:2]
device = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
attention_mask = torch.ones((batch_size, seq_len), device=device, dtype=torch.bool)
if inputs_embeds is None:
with torch.no_grad():
input_ids, indices, cu_seqlens, max_seqlen, position_ids, labels = _unpad_modernbert_input(inputs=input_ids, attention_mask=attention_mask, position_ids=position_ids, labels=labels)
else:
inputs_embeds, indices, cu_seqlens, max_seqlen, position_ids, labels = _unpad_modernbert_input(inputs=inputs_embeds, attention_mask=attention_mask, position_ids=position_ids, labels=labels)
outputs = self.model(input_ids=input_ids, attention_mask=attention_mask, sliding_window_mask=sliding_window_mask, position_ids=position_ids, inputs_embeds=inputs_embeds, indices=indices, cu_seqlens=cu_seqlens, max_seqlen=max_seqlen, batch_size=batch_size, seq_len=seq_len, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
last_hidden_state = outputs[0]
if self.sparse_prediction and labels is not None:
labels = labels.view(-1)
last_hidden_state = last_hidden_state.view(labels.shape[0], -1)
mask_tokens = labels != self.sparse_pred_ignore_index
last_hidden_state = last_hidden_state[mask_tokens]
labels = labels[mask_tokens]
logits = self.compiled_head(last_hidden_state) if self.config.reference_compile else self.decoder(self.head(last_hidden_state))
loss = None
if labels is not None:
loss = self.loss_function(logits, labels, vocab_size=self.config.vocab_size, **kwargs)
if self.config._attn_implementation == 'flash_attention_2':
with nullcontext() if self.config.repad_logits_with_grad or labels is None else torch.no_grad():
logits = _pad_modernbert_output(inputs=logits, indices=indices, batch=batch_size, seqlen=seq_len)
if not return_dict:
output = (logits,)
return (loss,) + output if loss is not None else output
return MaskedLMOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
|
@auto_docstring(custom_intro='\n The ModernBert Model with a decoder head on top that is used for masked language modeling.\n ')
class ModernBertForMaskedLM(ModernBertPreTrainedModel):
def __init__(self, config: ModernBertConfig):
pass
def get_output_embeddings(self):
pass
def set_output_embeddings(self, new_embeddings: nn.Linear):
pass
@torch.compile(dynamic=True)
def compiled_head(self, output: torch.Tensor) -> torch.Tensor:
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, sliding_window_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, indices: Optional[torch.Tensor]=None, cu_seqlens: Optional[torch.Tensor]=None, max_seqlen: Optional[int]=None, batch_size: Optional[int]=None, seq_len: Optional[int]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, **kwargs) -> Union[tuple[torch.Tensor], MaskedLMOutput]:
'''
sliding_window_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding or far-away tokens. In ModernBert, only every few layers
perform global attention, while the rest perform local attention. This mask is used to avoid attending to
far-away tokens in the local attention layers when not using Flash Attention.
indices (`torch.Tensor` of shape `(total_unpadded_tokens,)`, *optional*):
Indices of the non-padding tokens in the input sequence. Used for unpadding the output.
cu_seqlens (`torch.Tensor` of shape `(batch + 1,)`, *optional*):
Cumulative sequence lengths of the input sequences. Used to index the unpadded tensors.
max_seqlen (`int`, *optional*):
Maximum sequence length in the batch excluding padding tokens. Used to unpad input_ids and pad output tensors.
batch_size (`int`, *optional*):
Batch size of the input sequences. Used to pad the output tensors.
seq_len (`int`, *optional*):
Sequence length of the input sequences including padding tokens. Used to pad the output tensors.
'''
pass
| 9
| 1
| 22
| 3
| 19
| 1
| 4
| 0.03
| 1
| 9
| 4
| 0
| 5
| 6
| 5
| 138
| 126
| 18
| 105
| 39
| 75
| 3
| 52
| 20
| 46
| 16
| 3
| 4
| 20
|
3,970
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/modernbert/modular_modernbert.py
|
transformers.models.modernbert.modular_modernbert.ModernBertForSequenceClassification
|
from ...modeling_outputs import BaseModelOutput, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput
from typing import Literal, Optional, Union
from ...utils import auto_docstring, is_flash_attn_2_available, logging
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from torch import nn
import torch.nn.functional as F
import torch
@auto_docstring(custom_intro='\n The ModernBert Model with a sequence classification head on top that performs pooling.\n ')
class ModernBertForSequenceClassification(ModernBertPreTrainedModel):
def __init__(self, config: ModernBertConfig):
super().__init__(config)
self.num_labels = config.num_labels
self.config = config
self.model = ModernBertModel(config)
self.head = ModernBertPredictionHead(config)
self.drop = torch.nn.Dropout(config.classifier_dropout)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.post_init()
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, sliding_window_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, indices: Optional[torch.Tensor]=None, cu_seqlens: Optional[torch.Tensor]=None, max_seqlen: Optional[int]=None, batch_size: Optional[int]=None, seq_len: Optional[int]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, **kwargs) -> Union[tuple[torch.Tensor], SequenceClassifierOutput]:
"""
sliding_window_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding or far-away tokens. In ModernBert, only every few layers
perform global attention, while the rest perform local attention. This mask is used to avoid attending to
far-away tokens in the local attention layers when not using Flash Attention.
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
indices (`torch.Tensor` of shape `(total_unpadded_tokens,)`, *optional*):
Indices of the non-padding tokens in the input sequence. Used for unpadding the output.
cu_seqlens (`torch.Tensor` of shape `(batch + 1,)`, *optional*):
Cumulative sequence lengths of the input sequences. Used to index the unpadded tensors.
max_seqlen (`int`, *optional*):
Maximum sequence length in the batch excluding padding tokens. Used to unpad input_ids and pad output tensors.
batch_size (`int`, *optional*):
Batch size of the input sequences. Used to pad the output tensors.
seq_len (`int`, *optional*):
Sequence length of the input sequences including padding tokens. Used to pad the output tensors.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
self._maybe_set_compile()
if input_ids is not None:
self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
if batch_size is None and seq_len is None:
if inputs_embeds is not None:
batch_size, seq_len = inputs_embeds.shape[:2]
else:
batch_size, seq_len = input_ids.shape[:2]
device = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
attention_mask = torch.ones((batch_size, seq_len), device=device, dtype=torch.bool)
outputs = self.model(input_ids=input_ids, attention_mask=attention_mask, sliding_window_mask=sliding_window_mask, position_ids=position_ids, inputs_embeds=inputs_embeds, indices=indices, cu_seqlens=cu_seqlens, max_seqlen=max_seqlen, batch_size=batch_size, seq_len=seq_len, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
last_hidden_state = outputs[0]
if self.config.classifier_pooling == 'cls':
last_hidden_state = last_hidden_state[:, 0]
elif self.config.classifier_pooling == 'mean':
last_hidden_state = (last_hidden_state * attention_mask.unsqueeze(-1)).sum(dim=1) / attention_mask.sum(dim=1, keepdim=True)
pooled_output = self.head(last_hidden_state)
pooled_output = self.drop(pooled_output)
logits = self.classifier(pooled_output)
loss = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
self.config.problem_type = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = 'single_label_classification'
else:
self.config.problem_type = 'multi_label_classification'
if self.config.problem_type == 'regression':
loss_fct = MSELoss()
if self.num_labels == 1:
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(logits, labels)
elif self.config.problem_type == 'single_label_classification':
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
elif self.config.problem_type == 'multi_label_classification':
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
if not return_dict:
output = (logits,)
return (loss,) + output if loss is not None else output
return SequenceClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
|
@auto_docstring(custom_intro='\n The ModernBert Model with a sequence classification head on top that performs pooling.\n ')
class ModernBertForSequenceClassification(ModernBertPreTrainedModel):
def __init__(self, config: ModernBertConfig):
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, sliding_window_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, indices: Optional[torch.Tensor]=None, cu_seqlens: Optional[torch.Tensor]=None, max_seqlen: Optional[int]=None, batch_size: Optional[int]=None, seq_len: Optional[int]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, **kwargs) -> Union[tuple[torch.Tensor], SequenceClassifierOutput]:
'''
sliding_window_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding or far-away tokens. In ModernBert, only every few layers
perform global attention, while the rest perform local attention. This mask is used to avoid attending to
far-away tokens in the local attention layers when not using Flash Attention.
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
indices (`torch.Tensor` of shape `(total_unpadded_tokens,)`, *optional*):
Indices of the non-padding tokens in the input sequence. Used for unpadding the output.
cu_seqlens (`torch.Tensor` of shape `(batch + 1,)`, *optional*):
Cumulative sequence lengths of the input sequences. Used to index the unpadded tensors.
max_seqlen (`int`, *optional*):
Maximum sequence length in the batch excluding padding tokens. Used to unpad input_ids and pad output tensors.
batch_size (`int`, *optional*):
Batch size of the input sequences. Used to pad the output tensors.
seq_len (`int`, *optional*):
Sequence length of the input sequences including padding tokens. Used to pad the output tensors.
'''
pass
| 5
| 1
| 50
| 5
| 42
| 4
| 8
| 0.08
| 1
| 8
| 4
| 0
| 2
| 6
| 2
| 135
| 108
| 10
| 91
| 34
| 65
| 7
| 41
| 16
| 38
| 14
| 3
| 3
| 15
|
3,971
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/modernbert/modular_modernbert.py
|
transformers.models.modernbert.modular_modernbert.ModernBertForTokenClassification
|
from ...modeling_outputs import BaseModelOutput, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput
from typing import Literal, Optional, Union
from ...utils import auto_docstring, is_flash_attn_2_available, logging
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from torch import nn
import torch.nn.functional as F
import torch
@auto_docstring(custom_intro='\n The ModernBert Model with a token classification head on top, e.g. for Named Entity Recognition (NER) tasks.\n ')
class ModernBertForTokenClassification(ModernBertPreTrainedModel):
def __init__(self, config: ModernBertConfig):
super().__init__(config)
self.num_labels = config.num_labels
self.model = ModernBertModel(config)
self.head = ModernBertPredictionHead(config)
self.drop = torch.nn.Dropout(config.classifier_dropout)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.post_init()
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, sliding_window_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, indices: Optional[torch.Tensor]=None, cu_seqlens: Optional[torch.Tensor]=None, max_seqlen: Optional[int]=None, batch_size: Optional[int]=None, seq_len: Optional[int]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.Tensor], TokenClassifierOutput]:
"""
sliding_window_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding or far-away tokens. In ModernBert, only every few layers
perform global attention, while the rest perform local attention. This mask is used to avoid attending to
far-away tokens in the local attention layers when not using Flash Attention.
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
indices (`torch.Tensor` of shape `(total_unpadded_tokens,)`, *optional*):
Indices of the non-padding tokens in the input sequence. Used for unpadding the output.
cu_seqlens (`torch.Tensor` of shape `(batch + 1,)`, *optional*):
Cumulative sequence lengths of the input sequences. Used to index the unpadded tensors.
max_seqlen (`int`, *optional*):
Maximum sequence length in the batch excluding padding tokens. Used to unpad input_ids and pad output tensors.
batch_size (`int`, *optional*):
Batch size of the input sequences. Used to pad the output tensors.
seq_len (`int`, *optional*):
Sequence length of the input sequences including padding tokens. Used to pad the output tensors.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
self._maybe_set_compile()
outputs = self.model(input_ids=input_ids, attention_mask=attention_mask, sliding_window_mask=sliding_window_mask, position_ids=position_ids, inputs_embeds=inputs_embeds, indices=indices, cu_seqlens=cu_seqlens, max_seqlen=max_seqlen, batch_size=batch_size, seq_len=seq_len, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
last_hidden_state = outputs[0]
last_hidden_state = self.head(last_hidden_state)
last_hidden_state = self.drop(last_hidden_state)
logits = self.classifier(last_hidden_state)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[1:]
return (loss,) + output if loss is not None else output
return TokenClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
|
@auto_docstring(custom_intro='\n The ModernBert Model with a token classification head on top, e.g. for Named Entity Recognition (NER) tasks.\n ')
class ModernBertForTokenClassification(ModernBertPreTrainedModel):
def __init__(self, config: ModernBertConfig):
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, sliding_window_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, indices: Optional[torch.Tensor]=None, cu_seqlens: Optional[torch.Tensor]=None, max_seqlen: Optional[int]=None, batch_size: Optional[int]=None, seq_len: Optional[int]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.Tensor], TokenClassifierOutput]:
'''
sliding_window_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding or far-away tokens. In ModernBert, only every few layers
perform global attention, while the rest perform local attention. This mask is used to avoid attending to
far-away tokens in the local attention layers when not using Flash Attention.
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
indices (`torch.Tensor` of shape `(total_unpadded_tokens,)`, *optional*):
Indices of the non-padding tokens in the input sequence. Used for unpadding the output.
cu_seqlens (`torch.Tensor` of shape `(batch + 1,)`, *optional*):
Cumulative sequence lengths of the input sequences. Used to index the unpadded tensors.
max_seqlen (`int`, *optional*):
Maximum sequence length in the batch excluding padding tokens. Used to unpad input_ids and pad output tensors.
batch_size (`int`, *optional*):
Batch size of the input sequences. Used to pad the output tensors.
seq_len (`int`, *optional*):
Sequence length of the input sequences including padding tokens. Used to pad the output tensors.
'''
pass
| 5
| 1
| 36
| 4
| 30
| 3
| 3
| 0.08
| 1
| 8
| 4
| 0
| 2
| 5
| 2
| 135
| 79
| 8
| 66
| 31
| 41
| 5
| 25
| 14
| 22
| 5
| 3
| 1
| 6
|
3,972
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/modernbert/modular_modernbert.py
|
transformers.models.modernbert.modular_modernbert.ModernBertMLP
|
from ...activations import ACT2FN
import torch
import torch.nn.functional as F
from torch import nn
class ModernBertMLP(nn.Module):
"""Applies the GLU at the end of each ModernBERT layer.
Compared to the default BERT architecture, this block replaces :class:`~transformers.model.bert.modeling_bert.BertIntermediate`
and :class:`~transformers.model.bert.modeling_bert.SelfOutput` with a single module that has similar functionality.
"""
def __init__(self, config: ModernBertConfig):
super().__init__()
self.config = config
self.Wi = nn.Linear(config.hidden_size, int(config.intermediate_size) * 2, bias=config.mlp_bias)
self.act = ACT2FN[config.hidden_activation]
self.drop = nn.Dropout(config.mlp_dropout)
self.Wo = nn.Linear(config.intermediate_size, config.hidden_size, bias=config.mlp_bias)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
input, gate = self.Wi(hidden_states).chunk(2, dim=-1)
return self.Wo(self.drop(self.act(input) * gate))
|
class ModernBertMLP(nn.Module):
'''Applies the GLU at the end of each ModernBERT layer.
Compared to the default BERT architecture, this block replaces :class:`~transformers.model.bert.modeling_bert.BertIntermediate`
and :class:`~transformers.model.bert.modeling_bert.SelfOutput` with a single module that has similar functionality.
'''
def __init__(self, config: ModernBertConfig):
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
| 3
| 1
| 5
| 0
| 5
| 0
| 1
| 0.36
| 1
| 4
| 1
| 0
| 2
| 5
| 2
| 12
| 18
| 3
| 11
| 9
| 8
| 4
| 11
| 9
| 8
| 1
| 1
| 0
| 2
|
3,973
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/modernbert/modular_modernbert.py
|
transformers.models.modernbert.modular_modernbert.ModernBertModel
|
from ...modeling_outputs import BaseModelOutput, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput
from typing import Literal, Optional, Union
from ...utils import auto_docstring, is_flash_attn_2_available, logging
from torch import nn
from ...modeling_attn_mask_utils import _prepare_4d_attention_mask
import torch.nn.functional as F
import torch
@auto_docstring
class ModernBertModel(ModernBertPreTrainedModel):
def __init__(self, config: ModernBertConfig):
super().__init__(config)
self.config = config
self.embeddings = ModernBertEmbeddings(config)
self.layers = nn.ModuleList([ModernBertEncoderLayer(config, layer_id) for layer_id in range(config.num_hidden_layers)])
self.final_norm = nn.LayerNorm(config.hidden_size, eps=config.norm_eps, bias=config.norm_bias)
self.gradient_checkpointing = False
self.post_init()
def get_input_embeddings(self):
return self.embeddings.tok_embeddings
def set_input_embeddings(self, value):
self.embeddings.tok_embeddings = value
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, sliding_window_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.Tensor]=None, indices: Optional[torch.Tensor]=None, cu_seqlens: Optional[torch.Tensor]=None, max_seqlen: Optional[int]=None, batch_size: Optional[int]=None, seq_len: Optional[int]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.Tensor, ...], BaseModelOutput]:
"""
sliding_window_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding or far-away tokens. In ModernBert, only every few layers
perform global attention, while the rest perform local attention. This mask is used to avoid attending to
far-away tokens in the local attention layers when not using Flash Attention.
indices (`torch.Tensor` of shape `(total_unpadded_tokens,)`, *optional*):
Indices of the non-padding tokens in the input sequence. Used for unpadding the output.
cu_seqlens (`torch.Tensor` of shape `(batch + 1,)`, *optional*):
Cumulative sequence lengths of the input sequences. Used to index the unpadded tensors.
max_seqlen (`int`, *optional*):
Maximum sequence length in the batch excluding padding tokens. Used to unpad input_ids and pad output tensors.
batch_size (`int`, *optional*):
Batch size of the input sequences. Used to pad the output tensors.
seq_len (`int`, *optional*):
Sequence length of the input sequences including padding tokens. Used to pad the output tensors.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError('You must specify exactly one of input_ids or inputs_embeds')
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
self._maybe_set_compile()
if input_ids is not None:
self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
if batch_size is None and seq_len is None:
if inputs_embeds is not None:
batch_size, seq_len = inputs_embeds.shape[:2]
else:
batch_size, seq_len = input_ids.shape[:2]
device = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
attention_mask = torch.ones((batch_size, seq_len), device=device, dtype=torch.bool)
repad = False
if self.config._attn_implementation == 'flash_attention_2':
if indices is None and cu_seqlens is None and (max_seqlen is None):
repad = True
if inputs_embeds is None:
with torch.no_grad():
input_ids, indices, cu_seqlens, max_seqlen, *_ = _unpad_modernbert_input(inputs=input_ids, attention_mask=attention_mask)
else:
inputs_embeds, indices, cu_seqlens, max_seqlen, *_ = _unpad_modernbert_input(inputs=inputs_embeds, attention_mask=attention_mask)
else:
if position_ids is None:
position_ids = torch.arange(seq_len, device=device).unsqueeze(0)
attention_mask, sliding_window_mask = self._update_attention_mask(attention_mask, output_attentions=output_attentions)
hidden_states = self.embeddings(input_ids=input_ids, inputs_embeds=inputs_embeds)
for encoder_layer in self.layers:
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_outputs = encoder_layer(hidden_states, attention_mask=attention_mask, sliding_window_mask=sliding_window_mask, position_ids=position_ids, cu_seqlens=cu_seqlens, max_seqlen=max_seqlen, output_attentions=output_attentions)
hidden_states = layer_outputs[0]
if output_attentions and len(layer_outputs) > 1:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
hidden_states = self.final_norm(hidden_states)
if repad:
hidden_states = _pad_modernbert_output(inputs=hidden_states, indices=indices, batch=batch_size, seqlen=seq_len)
if all_hidden_states is not None:
all_hidden_states = tuple((_pad_modernbert_output(inputs=hs, indices=indices, batch=batch_size, seqlen=seq_len) for hs in all_hidden_states))
if not return_dict:
return tuple((v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None))
return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions)
def _update_attention_mask(self, attention_mask: torch.Tensor, output_attentions: bool) -> torch.Tensor:
if output_attentions:
if self.config._attn_implementation == 'sdpa':
logger.warning_once('Outputting attentions is only supported with the \'eager\' attention implementation, not with "sdpa". Falling back to `attn_implementation="eager"`.')
self.config._attn_implementation = 'eager'
elif self.config._attn_implementation != 'eager':
logger.warning_once(f'Outputting attentions is only supported with the eager attention implementation, not with {self.config._attn_implementation}. Consider setting `attn_implementation="eager"`. Setting `output_attentions=False`.')
global_attention_mask = _prepare_4d_attention_mask(attention_mask, self.dtype)
rows = torch.arange(global_attention_mask.shape[2]).unsqueeze(0)
distance = torch.abs(rows - rows.T)
window_mask = (distance <= self.config.local_attention // 2).unsqueeze(0).unsqueeze(0).to(attention_mask.device)
sliding_window_mask = global_attention_mask.masked_fill(window_mask.logical_not(), torch.finfo(self.dtype).min)
return (global_attention_mask, sliding_window_mask)
|
@auto_docstring
class ModernBertModel(ModernBertPreTrainedModel):
def __init__(self, config: ModernBertConfig):
pass
def get_input_embeddings(self):
pass
def set_input_embeddings(self, value):
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, sliding_window_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.Tensor]=None, indices: Optional[torch.Tensor]=None, cu_seqlens: Optional[torch.Tensor]=None, max_seqlen: Optional[int]=None, batch_size: Optional[int]=None, seq_len: Optional[int]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.Tensor, ...], BaseModelOutput]:
'''
sliding_window_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding or far-away tokens. In ModernBert, only every few layers
perform global attention, while the rest perform local attention. This mask is used to avoid attending to
far-away tokens in the local attention layers when not using Flash Attention.
indices (`torch.Tensor` of shape `(total_unpadded_tokens,)`, *optional*):
Indices of the non-padding tokens in the input sequence. Used for unpadding the output.
cu_seqlens (`torch.Tensor` of shape `(batch + 1,)`, *optional*):
Cumulative sequence lengths of the input sequences. Used to index the unpadded tensors.
max_seqlen (`int`, *optional*):
Maximum sequence length in the batch excluding padding tokens. Used to unpad input_ids and pad output tensors.
batch_size (`int`, *optional*):
Batch size of the input sequences. Used to pad the output tensors.
seq_len (`int`, *optional*):
Sequence length of the input sequences including padding tokens. Used to pad the output tensors.
'''
pass
def _update_attention_mask(self, attention_mask: torch.Tensor, output_attentions: bool) -> torch.Tensor:
pass
| 8
| 1
| 32
| 4
| 28
| 1
| 6
| 0.03
| 1
| 11
| 4
| 0
| 5
| 5
| 5
| 138
| 172
| 23
| 145
| 39
| 118
| 4
| 74
| 23
| 68
| 24
| 3
| 4
| 31
|
3,974
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/modernbert/modular_modernbert.py
|
transformers.models.modernbert.modular_modernbert.ModernBertPreTrainedModel
|
from ...utils.import_utils import is_triton_available
from typing import Literal, Optional, Union
from ...utils import auto_docstring, is_flash_attn_2_available, logging
from ...modeling_utils import PreTrainedModel
from torch import nn
import math
@auto_docstring
class ModernBertPreTrainedModel(PreTrainedModel):
config: ModernBertConfig
base_model_prefix = 'model'
supports_gradient_checkpointing = True
_no_split_modules = ['ModernBertEmbeddings', 'ModernBertEncoderLayer']
_supports_flash_attn = True
_supports_sdpa = True
_supports_flex_attn = False
def _init_weights(self, module: nn.Module):
cutoff_factor = self.config.initializer_cutoff_factor
if cutoff_factor is None:
cutoff_factor = 3
def init_weight(module: nn.Module, std: float):
nn.init.trunc_normal_(module.weight, mean=0.0, std=std, a=-cutoff_factor * std, b=cutoff_factor * std)
if isinstance(module, nn.Linear):
if module.bias is not None:
nn.init.zeros_(module.bias)
stds = {'in': self.config.initializer_range, 'out': self.config.initializer_range / math.sqrt(2.0 * self.config.num_hidden_layers), 'embedding': self.config.initializer_range, 'final_out': self.config.hidden_size ** (-0.5)}
if isinstance(module, ModernBertEmbeddings):
init_weight(module.tok_embeddings, stds['embedding'])
elif isinstance(module, ModernBertMLP):
init_weight(module.Wi, stds['in'])
init_weight(module.Wo, stds['out'])
elif isinstance(module, ModernBertAttention):
init_weight(module.Wqkv, stds['in'])
init_weight(module.Wo, stds['out'])
elif isinstance(module, ModernBertPredictionHead):
init_weight(module.dense, stds['out'])
elif isinstance(module, ModernBertForMaskedLM):
init_weight(module.decoder, stds['out'])
elif isinstance(module, (ModernBertForSequenceClassification, ModernBertForMultipleChoice, ModernBertForTokenClassification, ModernBertForQuestionAnswering)):
init_weight(module.classifier, stds['final_out'])
elif isinstance(module, nn.LayerNorm):
module.weight.data.fill_(1.0)
if module.bias is not None:
module.bias.data.zero_()
def _check_and_adjust_attn_implementation(self, attn_implementation: Optional[str], is_init_check: bool=False) -> str:
"""
Checks and dispatches to hhe requested attention implementation.
"""
try:
attn_implementation = 'flash_attention_2' if attn_implementation is None and self._flash_attn_2_can_dispatch() else attn_implementation
except (ValueError, ImportError):
pass
return super()._check_and_adjust_attn_implementation(attn_implementation=attn_implementation, is_init_check=is_init_check)
def _maybe_set_compile(self):
if self.config.reference_compile is False:
return
if hasattr(self, 'hf_device_map') and len(self.hf_device_map) > 1:
if self.config.reference_compile:
logger.warning_once('If `accelerate` split the model across devices, `torch.compile` will not work. Falling back to non-compiled mode.')
self.config.reference_compile = False
if self.device.type == 'mps':
if self.config.reference_compile:
logger.warning_once('Compiling the model with `torch.compile` and using a `torch.mps` device is not supported. Falling back to non-compiled mode.')
self.config.reference_compile = False
if self.device.type == 'cpu':
if self.config.reference_compile:
logger.warning_once('Compiling the model with `torch.compile` and using a `torch.cpu` device is not supported. Falling back to non-compiled mode.')
self.config.reference_compile = False
if self.config.reference_compile is None:
self.config.reference_compile = is_triton_available()
def resize_token_embeddings(self, *args, **kwargs):
model_embeds = super().resize_token_embeddings(*args, **kwargs)
if self.config.reference_compile in {True, None}:
if self.config.reference_compile:
logger.warning_once('Resizing token embeddings with `torch.compile` is not supported. Falling back to non-compiled mode.')
self.config.reference_compile = False
return model_embeds
|
@auto_docstring
class ModernBertPreTrainedModel(PreTrainedModel):
def _init_weights(self, module: nn.Module):
pass
def init_weight(module: nn.Module, std: float):
pass
def _check_and_adjust_attn_implementation(self, attn_implementation: Optional[str], is_init_check: bool=False) -> str:
'''
Checks and dispatches to hhe requested attention implementation.
'''
pass
def _maybe_set_compile(self):
pass
def resize_token_embeddings(self, *args, **kwargs):
pass
| 7
| 1
| 25
| 2
| 22
| 1
| 5
| 0.04
| 1
| 14
| 7
| 4
| 3
| 0
| 4
| 133
| 124
| 14
| 106
| 24
| 92
| 4
| 59
| 16
| 53
| 9
| 2
| 2
| 26
|
3,975
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/modernbert/modular_modernbert.py
|
transformers.models.modernbert.modular_modernbert.ModernBertPredictionHead
|
import torch
from ...activations import ACT2FN
import torch.nn.functional as F
from torch import nn
class ModernBertPredictionHead(nn.Module):
def __init__(self, config: ModernBertConfig):
super().__init__()
self.config = config
self.dense = nn.Linear(config.hidden_size, config.hidden_size, config.classifier_bias)
self.act = ACT2FN[config.classifier_activation]
self.norm = nn.LayerNorm(config.hidden_size, eps=config.norm_eps, bias=config.norm_bias)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
return self.norm(self.act(self.dense(hidden_states)))
|
class ModernBertPredictionHead(nn.Module):
def __init__(self, config: ModernBertConfig):
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 4
| 0
| 4
| 0
| 1
| 0
| 1
| 3
| 1
| 0
| 2
| 4
| 2
| 12
| 10
| 1
| 9
| 7
| 6
| 0
| 9
| 7
| 6
| 1
| 1
| 0
| 2
|
3,976
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/modernbert/modular_modernbert.py
|
transformers.models.modernbert.modular_modernbert.ModernBertRotaryEmbedding
|
from ..gemma.modeling_gemma import GemmaRotaryEmbedding, apply_rotary_pos_emb
class ModernBertRotaryEmbedding(GemmaRotaryEmbedding):
pass
|
class ModernBertRotaryEmbedding(GemmaRotaryEmbedding):
pass
| 1
| 0
| 3
| 0
| 3
| 0
| 1
| 0
| 1
| 4
| 1
| 0
| 1
| 1
| 1
| 14
| 4
| 0
| 4
| 3
| 2
| 0
| 4
| 3
| 2
| 1
| 2
| 0
| 1
|
3,977
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/modernbert/modular_modernbert.py
|
transformers.models.modernbert.modular_modernbert.ModernBertUnpaddedRotaryEmbedding
|
import torch
import torch.nn.functional as F
from typing import Literal, Optional, Union
class ModernBertUnpaddedRotaryEmbedding(RotaryEmbedding):
"""
The rotary position embeddings applied directly to unpadded sequences.
"""
def __init__(self, dim: int, base: float=10000.0, max_seqlen: Optional[int]=None, device: Optional[torch.device]=None, dtype: Optional[torch.dtype]=None):
"""
max_seqlen: if max_seqlen, device, and dtype are provided, we precompute the cos_sin_cache
up to max_seqlen. If the max_seqlen, device, or dtype during training/inference differ,
the cos_sin_cache will be recomputed during the forward pass.
"""
super().__init__(dim=dim, base=base, device=device, interleaved=False)
self.max_seqlen = max_seqlen
if max_seqlen is not None and device is not None and (dtype is not None):
self._update_cos_sin_cache(max_seqlen, device=device, dtype=dtype)
def forward(self, qkv: torch.Tensor, cu_seqlens: torch.Tensor, max_seqlen: Optional[int]=None) -> Union[torch.Tensor, tuple[torch.Tensor, torch.Tensor]]:
"""
Apply rotary embedding *inplace* to qkv.
qkv: (total_nnz, 3, nheads, headdim)
cu_seqlens: (batch + 1,) cumulative sequence lengths
max_seqlen: int max seq length in the batch
"""
if max_seqlen is not None:
self._update_cos_sin_cache(max_seqlen, device=qkv.device, dtype=qkv.dtype)
qkv = apply_rotary_unpadded(qkv, self._cos_cached, self._sin_cached, cu_seqlens=cu_seqlens, max_seqlen=max_seqlen)
return qkv
def extra_repr(self) -> str:
return f'dim={self.dim}, base={self.base}, scale_base={self.scale_base}'
|
class ModernBertUnpaddedRotaryEmbedding(RotaryEmbedding):
'''
The rotary position embeddings applied directly to unpadded sequences.
'''
def __init__(self, dim: int, base: float=10000.0, max_seqlen: Optional[int]=None, device: Optional[torch.device]=None, dtype: Optional[torch.dtype]=None):
'''
max_seqlen: if max_seqlen, device, and dtype are provided, we precompute the cos_sin_cache
up to max_seqlen. If the max_seqlen, device, or dtype during training/inference differ,
the cos_sin_cache will be recomputed during the forward pass.
'''
pass
def forward(self, qkv: torch.Tensor, cu_seqlens: torch.Tensor, max_seqlen: Optional[int]=None) -> Union[torch.Tensor, tuple[torch.Tensor, torch.Tensor]]:
'''
Apply rotary embedding *inplace* to qkv.
qkv: (total_nnz, 3, nheads, headdim)
cu_seqlens: (batch + 1,) cumulative sequence lengths
max_seqlen: int max seq length in the batch
'''
pass
def extra_repr(self) -> str:
pass
| 4
| 3
| 15
| 1
| 10
| 4
| 2
| 0.45
| 1
| 5
| 0
| 0
| 3
| 3
| 3
| 3
| 51
| 6
| 31
| 19
| 15
| 14
| 13
| 5
| 9
| 2
| 1
| 1
| 5
|
3,978
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/moonshine/configuration_moonshine.py
|
transformers.models.moonshine.configuration_moonshine.MoonshineConfig
|
from ...modeling_rope_utils import rope_config_validation
from ...configuration_utils import PretrainedConfig
class MoonshineConfig(PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`MoonshineModel`]. It is used to instantiate a Moonshine
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the Moonshine
[UsefulSensors/moonshine-tiny](https://huggingface.co/UsefulSensors/moonshine-tiny).
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 32768):
Vocabulary size of the Moonshine model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`MoonshineModel`].
hidden_size (`int`, *optional*, defaults to 288):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 1152):
Dimension of the MLP representations.
encoder_num_hidden_layers (`int`, *optional*, defaults to 6):
Number of hidden layers in the Transformer encoder.
decoder_num_hidden_layers (`int`, *optional*, defaults to 6):
Number of hidden layers in the Transformer decoder.
encoder_num_attention_heads (`int`, *optional*, defaults to 8):
Number of attention heads for each attention layer in the Transformer encoder.
decoder_num_attention_heads (`int`, *optional*, defaults to 8):
Number of attention heads for each attention layer in the Transformer decoder.
encoder_num_key_value_heads (`int`, *optional*):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`encoder_num_key_value_heads=encoder_num_attention_heads`, the model will use Multi Head Attention (MHA), if
`encoder_num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details, check out [this
paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to
`num_attention_heads`.
decoder_num_key_value_heads (`int`, *optional*):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`decoder_num_key_value_heads=decoder_num_attention_heads`, the model will use Multi Head Attention (MHA), if
`decoder_num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details, check out [this
paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to
`decoder_num_attention_heads`.
pad_head_dim_to_multiple_of (`int`, *optional*):
Pad head dimension in encoder and decoder to the next multiple of this value. Necessary for using certain
optimized attention implementations.
encoder_hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder.
decoder_hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the decoder.
max_position_embeddings (`int`, *optional*, defaults to 512):
The maximum sequence length that this model might ever be used with.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
decoder_start_token_id (`int`, *optional*, defaults to 1):
Corresponds to the "<|startoftranscript|>" token, which is automatically used when no `decoder_input_ids`
are provided to the `generate` function. It is used to guide the model`s generation process depending on
the task.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
rope_theta (`float`, *optional*, defaults to 10000.0):
The base period of the RoPE embeddings.
rope_scaling (`Dict`, *optional*):
Dictionary containing the scaling configuration for the RoPE embeddings. NOTE: if you apply new rope type
and you expect the model to work on longer `max_position_embeddings`, we recommend you to update this value
accordingly.
Expected contents:
`rope_type` (`str`):
The sub-variant of RoPE to use. Can be one of ['default', 'linear', 'dynamic', 'yarn', 'longrope',
'llama3'], with 'default' being the original RoPE implementation.
`factor` (`float`, *optional*):
Used with all rope types except 'default'. The scaling factor to apply to the RoPE embeddings. In
most scaling types, a `factor` of x will enable the model to handle sequences of length x *
original maximum pre-trained length.
`original_max_position_embeddings` (`int`, *optional*):
Used with 'dynamic', 'longrope' and 'llama3'. The original max position embeddings used during
pretraining.
`attention_factor` (`float`, *optional*):
Used with 'yarn' and 'longrope'. The scaling factor to be applied on the attention
computation. If unspecified, it defaults to value recommended by the implementation, using the
`factor` field to infer the suggested value.
`beta_fast` (`float`, *optional*):
Only used with 'yarn'. Parameter to set the boundary for extrapolation (only) in the linear
ramp function. If unspecified, it defaults to 32.
`beta_slow` (`float`, *optional*):
Only used with 'yarn'. Parameter to set the boundary for interpolation (only) in the linear
ramp function. If unspecified, it defaults to 1.
`short_factor` (`list[float]`, *optional*):
Only used with 'longrope'. The scaling factor to be applied to short contexts (<
`original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden
size divided by the number of attention heads divided by 2
`long_factor` (`list[float]`, *optional*):
Only used with 'longrope'. The scaling factor to be applied to long contexts (<
`original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden
size divided by the number of attention heads divided by 2
`low_freq_factor` (`float`, *optional*):
Only used with 'llama3'. Scaling factor applied to low frequency components of the RoPE
`high_freq_factor` (`float`, *optional*):
Only used with 'llama3'. Scaling factor applied to high frequency components of the RoPE
partial_rotary_factor (`float`, *optional*, defaults to 0.9):
Percentage of the query and keys which will have rotary embedding.
is_encoder_decoder (`bool`, *optional*, defaults to `True`):
Whether the model is used as an encoder/decoder or not.
attention_bias (`bool`, *optional*, defaults to `False`):
Whether to use a bias in the query, key, value and output projection layers during self-attention.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
bos_token_id (`int`, *optional*, defaults to 1):
Denotes beginning of sequences token id.
eos_token_id (`int`, *optional*, defaults to 2):
Denotes end of sequences token id.
Example:
```python
>>> from transformers import MoonshineModel, MoonshineConfig
>>> # Initializing a Moonshine style configuration
>>> configuration = MoonshineConfig().from_pretrained("UsefulSensors/moonshine-tiny")
>>> # Initializing a model from the configuration
>>> model = MoonshineModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = 'moonshine'
keys_to_ignore_at_inference = ['past_key_values']
attribute_map = {'num_key_value_heads': 'encoder_num_key_value_heads', 'num_attention_heads': 'encoder_num_attention_heads', 'num_hidden_layers': 'encoder_num_hidden_layers'}
def __init__(self, vocab_size=32768, hidden_size=288, intermediate_size=1152, encoder_num_hidden_layers=6, decoder_num_hidden_layers=6, encoder_num_attention_heads=8, decoder_num_attention_heads=8, encoder_num_key_value_heads=None, decoder_num_key_value_heads=None, pad_head_dim_to_multiple_of=None, encoder_hidden_act='gelu', decoder_hidden_act='silu', max_position_embeddings=512, initializer_range=0.02, decoder_start_token_id=1, use_cache=True, rope_theta=10000.0, rope_scaling=None, partial_rotary_factor=0.9, is_encoder_decoder=True, attention_bias=False, attention_dropout=0.0, bos_token_id=1, eos_token_id=2, **kwargs):
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.encoder_num_hidden_layers = encoder_num_hidden_layers
self.decoder_num_hidden_layers = decoder_num_hidden_layers
self.encoder_num_attention_heads = encoder_num_attention_heads
self.decoder_num_attention_heads = decoder_num_attention_heads
if encoder_num_key_value_heads is None:
encoder_num_key_value_heads = encoder_num_attention_heads
self.encoder_num_key_value_heads = encoder_num_key_value_heads
if decoder_num_key_value_heads is None:
decoder_num_key_value_heads = decoder_num_attention_heads
self.decoder_num_key_value_heads = decoder_num_key_value_heads
self.pad_head_dim_to_multiple_of = pad_head_dim_to_multiple_of
self.encoder_hidden_act = encoder_hidden_act
self.decoder_hidden_act = decoder_hidden_act
self.max_position_embeddings = max_position_embeddings
self.initializer_range = initializer_range
self.decoder_start_token_id = decoder_start_token_id
self.use_cache = use_cache
self.rope_theta = rope_theta
self.rope_scaling = rope_scaling
self.partial_rotary_factor = partial_rotary_factor
self.is_encoder_decoder = is_encoder_decoder
self.attention_bias = attention_bias
self.attention_dropout = attention_dropout
rope_config_validation(self)
super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, is_encoder_decoder=is_encoder_decoder, decoder_start_token_id=decoder_start_token_id, **kwargs)
|
class MoonshineConfig(PretrainedConfig):
'''
This is the configuration class to store the configuration of a [`MoonshineModel`]. It is used to instantiate a Moonshine
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the Moonshine
[UsefulSensors/moonshine-tiny](https://huggingface.co/UsefulSensors/moonshine-tiny).
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 32768):
Vocabulary size of the Moonshine model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`MoonshineModel`].
hidden_size (`int`, *optional*, defaults to 288):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 1152):
Dimension of the MLP representations.
encoder_num_hidden_layers (`int`, *optional*, defaults to 6):
Number of hidden layers in the Transformer encoder.
decoder_num_hidden_layers (`int`, *optional*, defaults to 6):
Number of hidden layers in the Transformer decoder.
encoder_num_attention_heads (`int`, *optional*, defaults to 8):
Number of attention heads for each attention layer in the Transformer encoder.
decoder_num_attention_heads (`int`, *optional*, defaults to 8):
Number of attention heads for each attention layer in the Transformer decoder.
encoder_num_key_value_heads (`int`, *optional*):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`encoder_num_key_value_heads=encoder_num_attention_heads`, the model will use Multi Head Attention (MHA), if
`encoder_num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details, check out [this
paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to
`num_attention_heads`.
decoder_num_key_value_heads (`int`, *optional*):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`decoder_num_key_value_heads=decoder_num_attention_heads`, the model will use Multi Head Attention (MHA), if
`decoder_num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details, check out [this
paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to
`decoder_num_attention_heads`.
pad_head_dim_to_multiple_of (`int`, *optional*):
Pad head dimension in encoder and decoder to the next multiple of this value. Necessary for using certain
optimized attention implementations.
encoder_hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder.
decoder_hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the decoder.
max_position_embeddings (`int`, *optional*, defaults to 512):
The maximum sequence length that this model might ever be used with.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
decoder_start_token_id (`int`, *optional*, defaults to 1):
Corresponds to the "<|startoftranscript|>" token, which is automatically used when no `decoder_input_ids`
are provided to the `generate` function. It is used to guide the model`s generation process depending on
the task.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
rope_theta (`float`, *optional*, defaults to 10000.0):
The base period of the RoPE embeddings.
rope_scaling (`Dict`, *optional*):
Dictionary containing the scaling configuration for the RoPE embeddings. NOTE: if you apply new rope type
and you expect the model to work on longer `max_position_embeddings`, we recommend you to update this value
accordingly.
Expected contents:
`rope_type` (`str`):
The sub-variant of RoPE to use. Can be one of ['default', 'linear', 'dynamic', 'yarn', 'longrope',
'llama3'], with 'default' being the original RoPE implementation.
`factor` (`float`, *optional*):
Used with all rope types except 'default'. The scaling factor to apply to the RoPE embeddings. In
most scaling types, a `factor` of x will enable the model to handle sequences of length x *
original maximum pre-trained length.
`original_max_position_embeddings` (`int`, *optional*):
Used with 'dynamic', 'longrope' and 'llama3'. The original max position embeddings used during
pretraining.
`attention_factor` (`float`, *optional*):
Used with 'yarn' and 'longrope'. The scaling factor to be applied on the attention
computation. If unspecified, it defaults to value recommended by the implementation, using the
`factor` field to infer the suggested value.
`beta_fast` (`float`, *optional*):
Only used with 'yarn'. Parameter to set the boundary for extrapolation (only) in the linear
ramp function. If unspecified, it defaults to 32.
`beta_slow` (`float`, *optional*):
Only used with 'yarn'. Parameter to set the boundary for interpolation (only) in the linear
ramp function. If unspecified, it defaults to 1.
`short_factor` (`list[float]`, *optional*):
Only used with 'longrope'. The scaling factor to be applied to short contexts (<
`original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden
size divided by the number of attention heads divided by 2
`long_factor` (`list[float]`, *optional*):
Only used with 'longrope'. The scaling factor to be applied to long contexts (<
`original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden
size divided by the number of attention heads divided by 2
`low_freq_factor` (`float`, *optional*):
Only used with 'llama3'. Scaling factor applied to low frequency components of the RoPE
`high_freq_factor` (`float`, *optional*):
Only used with 'llama3'. Scaling factor applied to high frequency components of the RoPE
partial_rotary_factor (`float`, *optional*, defaults to 0.9):
Percentage of the query and keys which will have rotary embedding.
is_encoder_decoder (`bool`, *optional*, defaults to `True`):
Whether the model is used as an encoder/decoder or not.
attention_bias (`bool`, *optional*, defaults to `False`):
Whether to use a bias in the query, key, value and output projection layers during self-attention.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
bos_token_id (`int`, *optional*, defaults to 1):
Denotes beginning of sequences token id.
eos_token_id (`int`, *optional*, defaults to 2):
Denotes end of sequences token id.
Example:
```python
>>> from transformers import MoonshineModel, MoonshineConfig
>>> # Initializing a Moonshine style configuration
>>> configuration = MoonshineConfig().from_pretrained("UsefulSensors/moonshine-tiny")
>>> # Initializing a model from the configuration
>>> model = MoonshineModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```'''
def __init__(self, vocab_size=32768, hidden_size=288, intermediate_size=1152, encoder_num_hidden_layers=6, decoder_num_hidden_layers=6, encoder_num_attention_heads=8, decoder_num_attention_heads=8, encoder_num_key_value_heads=None, decoder_num_key_value_heads=None, pad_head_dim_to_multiple_of=None, encoder_hidden_act='gelu', decoder_hidden_act='silu', max_position_embeddings=512, initializer_range=0.02, decoder_start_token_id=1, use_cache=True, rope_theta=10000.0, rope_scaling=None, partial_rotary_factor=0.9, is_encoder_decoder=True, attention_bias=False, attention_dropout=0.0, bos_token_id=1, eos_token_id=2, **kwargs):
pass
| 2
| 1
| 69
| 6
| 62
| 1
| 3
| 1.69
| 1
| 1
| 0
| 0
| 1
| 22
| 1
| 1
| 203
| 15
| 70
| 54
| 41
| 118
| 33
| 27
| 31
| 3
| 1
| 1
| 3
|
3,979
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/moonshine/modeling_moonshine.py
|
transformers.models.moonshine.modeling_moonshine.MoonshineAttention
|
import torch.nn as nn
from ...modeling_flash_attention_utils import FlashAttentionKwargs
from typing import Callable, Optional, Union
from ...processing_utils import Unpack
from .configuration_moonshine import MoonshineConfig
import torch
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache
from ...utils.deprecation import deprecate_kwarg
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
class MoonshineAttention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, config: MoonshineConfig, layer_idx: int, is_causal: bool, num_attention_heads: int, num_key_value_heads: int):
super().__init__()
config.update({'num_attention_heads': num_attention_heads, 'num_key_value_heads': num_key_value_heads})
self.config = config
self.layer_idx = layer_idx
self.head_dim = getattr(config, 'head_dim', config.hidden_size // config.num_attention_heads)
self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
self.scaling = self.head_dim ** (-0.5)
self.attention_dropout = config.attention_dropout
self.is_causal = is_causal
self.q_proj = nn.Linear(config.hidden_size, config.num_attention_heads * self.head_dim, bias=config.attention_bias)
self.k_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias)
self.v_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias)
self.o_proj = nn.Linear(config.num_attention_heads * self.head_dim, config.hidden_size, bias=False)
if self.config.pad_head_dim_to_multiple_of is not None:
target_multiple = self.config.pad_head_dim_to_multiple_of
target_head_dim = target_multiple * ((self.head_dim + target_multiple - 1) // target_multiple)
self.head_dim_padding = target_head_dim - self.head_dim
else:
self.head_dim_padding = 0
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]]=None, attention_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, cache_position: Optional[torch.LongTensor]=None, key_value_states: Optional[torch.Tensor]=None, **kwargs: Unpack[FlashAttentionKwargs]) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
bsz, q_len = hidden_states.shape[:-1]
query_states = self.q_proj(hidden_states).view(bsz, q_len, self.config.num_key_value_heads, self.head_dim).transpose(1, 2)
is_cross_attention = key_value_states is not None
if past_key_values is not None:
is_updated = past_key_values.is_updated.get(self.layer_idx)
if is_cross_attention:
past_key_values.is_updated[self.layer_idx] = True
past_key_values = past_key_values.cross_attention_cache
else:
past_key_values = past_key_values.self_attention_cache
current_states = key_value_states if key_value_states is not None else hidden_states
if is_cross_attention and past_key_values and is_updated:
key_states = past_key_values.layers[self.layer_idx].keys
value_states = past_key_values.layers[self.layer_idx].values
else:
key_states = self.k_proj(current_states).view(bsz, -1, self.config.num_key_value_heads, self.head_dim).transpose(1, 2)
value_states = self.v_proj(current_states).view(bsz, -1, self.config.num_key_value_heads, self.head_dim).transpose(1, 2)
if is_cross_attention and past_key_values is not None:
key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, {'cache_position': cache_position})
if not is_cross_attention:
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
if past_key_values is not None:
cache_kwargs = {'sin': sin, 'cos': cos, 'cache_position': cache_position}
key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != 'eager':
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
is_causal = self.is_causal and attention_mask is None and (q_len > 1)
if self.head_dim_padding > 0:
query_states = torch.nn.functional.pad(query_states, (0, self.head_dim_padding))
key_states = torch.nn.functional.pad(key_states, (0, self.head_dim_padding))
value_states = torch.nn.functional.pad(value_states, (0, self.head_dim_padding))
attn_output, attn_weights = attention_interface(self, query_states, key_states, value_states, attention_mask, dropout=0.0 if not self.training else self.attention_dropout, scaling=self.scaling, is_causal=is_causal, **kwargs)
if self.head_dim_padding > 0:
attn_output = attn_output[..., :-self.head_dim_padding]
attn_output = attn_output.reshape(bsz, q_len, -1).contiguous()
attn_output = self.o_proj(attn_output)
return (attn_output, attn_weights)
|
class MoonshineAttention(nn.Module):
'''Multi-headed attention from 'Attention Is All You Need' paper'''
def __init__(self, config: MoonshineConfig, layer_idx: int, is_causal: bool, num_attention_heads: int, num_key_value_heads: int):
pass
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]]=None, attention_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, cache_position: Optional[torch.LongTensor]=None, key_value_states: Optional[torch.Tensor]=None, **kwargs: Unpack[FlashAttentionKwargs]) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
pass
| 4
| 1
| 64
| 7
| 56
| 2
| 8
| 0.04
| 1
| 7
| 3
| 0
| 2
| 12
| 2
| 12
| 132
| 15
| 113
| 45
| 94
| 4
| 60
| 29
| 57
| 14
| 1
| 2
| 16
|
3,980
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/moonshine/modeling_moonshine.py
|
transformers.models.moonshine.modeling_moonshine.MoonshineDecoder
|
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple
import torch.nn as nn
from ...masking_utils import create_causal_mask
from .configuration_moonshine import MoonshineConfig
from ...processing_utils import Unpack
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPast, BaseModelOutputWithPastAndCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput
import torch
from ...modeling_attn_mask_utils import _prepare_4d_attention_mask, _prepare_4d_attention_mask_for_sdpa
from transformers.utils.generic import OutputRecorder, check_model_inputs
from typing import Callable, Optional, Union
@auto_docstring
class MoonshineDecoder(MoonshinePreTrainedModel):
main_input_name = 'input_ids'
_can_record_outputs = {'attentions': OutputRecorder(MoonshineAttention, index=1, layer_name='self_attn'), 'hidden_states': MoonshineDecoderLayer, 'cross_attentions': OutputRecorder(MoonshineAttention, index=1, layer_name='encoder_attn')}
def __init__(self, config: MoonshineConfig):
super().__init__(config)
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
self.layers = nn.ModuleList([MoonshineDecoderLayer(config, idx) for idx in range(config.decoder_num_hidden_layers)])
self.norm = nn.LayerNorm(config.hidden_size, bias=False)
self.rotary_emb = MoonshineRotaryEmbedding(config=config)
self.gradient_checkpointing = False
self.post_init()
@check_model_inputs
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple, BaseModelOutputWithPast]:
"""
encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
of the decoder.
encoder_attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding indices in `encoder_hidden_states`. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
"""
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError('You must specify exactly one of input_ids or inputs_embeds')
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids)
if use_cache and past_key_values is None:
past_key_values = EncoderDecoderCache(DynamicCache(config=self.config), DynamicCache(config=self.config))
if cache_position is None:
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
cache_position = torch.arange(past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device)
if position_ids is None:
position_ids = cache_position.unsqueeze(0)
causal_mask = create_causal_mask(config=self.config, input_embeds=inputs_embeds, attention_mask=attention_mask, cache_position=cache_position, past_key_values=past_key_values, position_ids=position_ids)
hidden_states = inputs_embeds
position_embeddings = self.rotary_emb(hidden_states, position_ids)
if encoder_attention_mask is not None:
mask_len = encoder_hidden_states.shape[-2]
downsample_stride = 64 * 3 * 2
encoder_attention_mask = encoder_attention_mask[..., ::downsample_stride][..., :mask_len]
if self.config._attn_implementation == 'flash_attention_2':
encoder_attention_mask = encoder_attention_mask if (encoder_attention_mask == 0.0).any() else None
elif self.config._attn_implementation == 'sdpa':
encoder_attention_mask = _prepare_4d_attention_mask_for_sdpa(encoder_attention_mask, hidden_states.dtype, hidden_states.shape[-2])
else:
encoder_attention_mask = _prepare_4d_attention_mask(encoder_attention_mask, hidden_states.dtype, hidden_states.shape[-2])
for decoder_layer in self.layers:
hidden_states = decoder_layer(hidden_states, causal_mask, encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, position_ids=position_ids, past_key_values=past_key_values, use_cache=use_cache, cache_position=cache_position, position_embeddings=position_embeddings, **kwargs)
hidden_states = self.norm(hidden_states)
return BaseModelOutputWithPastAndCrossAttentions(last_hidden_state=hidden_states, past_key_values=past_key_values if use_cache else None)
|
@auto_docstring
class MoonshineDecoder(MoonshinePreTrainedModel):
def __init__(self, config: MoonshineConfig):
pass
@check_model_inputs
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple, BaseModelOutputWithPast]:
'''
encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
of the decoder.
encoder_attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding indices in `encoder_hidden_states`. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
'''
pass
| 5
| 1
| 48
| 5
| 34
| 9
| 7
| 0.27
| 1
| 17
| 11
| 0
| 5
| 7
| 6
| 8
| 302
| 38
| 210
| 73
| 170
| 56
| 103
| 40
| 96
| 27
| 2
| 3
| 43
|
3,981
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/moonshine/modeling_moonshine.py
|
transformers.models.moonshine.modeling_moonshine.MoonshineDecoderLayer
|
import torch
from ...modeling_layers import GradientCheckpointingLayer
from ...processing_utils import Unpack
from ...utils.deprecation import deprecate_kwarg
from typing import Callable, Optional, Union
import torch.nn as nn
from .configuration_moonshine import MoonshineConfig
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple
class MoonshineDecoderLayer(GradientCheckpointingLayer):
def __init__(self, config: MoonshineConfig, layer_idx: Optional[int]=None):
super().__init__()
self.hidden_size = config.hidden_size
self.self_attn = MoonshineAttention(config=config, layer_idx=layer_idx, is_causal=True, num_attention_heads=config.decoder_num_attention_heads, num_key_value_heads=config.decoder_num_key_value_heads)
self.encoder_attn = MoonshineAttention(config=config, layer_idx=layer_idx, is_causal=False, num_attention_heads=config.decoder_num_attention_heads, num_key_value_heads=config.decoder_num_key_value_heads)
self.mlp = MoonshineDecoderMLP(config, config.decoder_hidden_act)
self.input_layernorm = nn.LayerNorm(config.hidden_size, bias=False)
self.post_attention_layernorm = nn.LayerNorm(config.hidden_size, bias=False)
self.final_layernorm = nn.LayerNorm(config.hidden_size, bias=False)
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, encoder_position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, use_cache: Optional[bool]=False, cache_position: Optional[torch.LongTensor]=None, position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]]=None, encoder_position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]]=None, **kwargs: Unpack[TransformersKwargs]) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]:
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
hidden_states, _ = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, use_cache=use_cache, cache_position=cache_position, position_embeddings=position_embeddings, **kwargs)
hidden_states = residual + hidden_states
if encoder_hidden_states is not None:
residual = hidden_states
hidden_states = self.post_attention_layernorm(hidden_states)
hidden_states, _ = self.encoder_attn(hidden_states=hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, past_key_values=past_key_values, use_cache=use_cache)
hidden_states = residual + hidden_states
residual = hidden_states
hidden_states = self.final_layernorm(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + hidden_states
return hidden_states
|
class MoonshineDecoderLayer(GradientCheckpointingLayer):
def __init__(self, config: MoonshineConfig, layer_idx: Optional[int]=None):
pass
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, encoder_position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, use_cache: Optional[bool]=False, cache_position: Optional[torch.LongTensor]=None, position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]]=None, encoder_position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]]=None, **kwargs: Unpack[TransformersKwargs]) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]:
pass
| 4
| 0
| 42
| 5
| 36
| 2
| 2
| 0.05
| 1
| 8
| 4
| 0
| 2
| 7
| 2
| 12
| 86
| 10
| 73
| 29
| 55
| 4
| 29
| 14
| 26
| 3
| 1
| 1
| 4
|
3,982
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/moonshine/modeling_moonshine.py
|
transformers.models.moonshine.modeling_moonshine.MoonshineEncoder
|
import torch.nn as nn
from ...modeling_attn_mask_utils import _prepare_4d_attention_mask, _prepare_4d_attention_mask_for_sdpa
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPast, BaseModelOutputWithPastAndCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput
from .configuration_moonshine import MoonshineConfig
from typing import Callable, Optional, Union
from ...processing_utils import Unpack
from transformers.utils.generic import OutputRecorder, check_model_inputs
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple
import torch
class MoonshineEncoder(MoonshinePreTrainedModel):
"""
Transformer encoder consisting of *config.num_hidden_layers* layers. Each layer is a [`MoonshineEncoderLayer`]
Args:
config: MoonshineConfig
"""
main_input_name = 'input_values'
_can_record_outputs = {'attentions': MoonshineAttention, 'hidden_states': MoonshineEncoderLayer}
def __init__(self, config: MoonshineConfig):
super().__init__(config)
self.config = config
embed_dim = config.hidden_size
self.conv1 = nn.Conv1d(1, embed_dim, kernel_size=127, stride=64, bias=False)
self.conv2 = nn.Conv1d(embed_dim, 2 * embed_dim, kernel_size=7, stride=3)
self.conv3 = nn.Conv1d(2 * embed_dim, embed_dim, kernel_size=3, stride=2)
self.groupnorm = nn.GroupNorm(num_groups=1, num_channels=embed_dim, eps=1e-05)
self.rotary_emb = MoonshineRotaryEmbedding(config=config)
self.layers = nn.ModuleList([MoonshineEncoderLayer(config, idx) for idx in range(config.encoder_num_hidden_layers)])
self.layer_norm = nn.LayerNorm(embed_dim, bias=False)
self.gradient_checkpointing = False
self.post_init()
def get_input_embeddings(self) -> nn.Module:
return self.conv1
def set_input_embeddings(self, value: nn.Module):
self.conv1 = value
@check_model_inputs
def forward(self, input_values: torch.FloatTensor, attention_mask: Optional[torch.Tensor]=None, **kwargs: Unpack[TransformersKwargs]) -> BaseModelOutputWithPast:
"""
Args:
input_values (`torch.FloatTensor` of shape `(batch_size, audio_length)`):
Float values of the raw speech waveform. Raw speech waveform can be
obtained by loading a `.flac` or `.wav` audio file into an array of type `list[float]`, a
`numpy.ndarray` or a `torch.Tensor`, *e.g.* via the torchcodec library (`pip install torchcodec`) or
the soundfile library (`pip install soundfile`). To prepare the array into
`input_values`, the [`AutoFeatureExtractor`] should be used for padding
and conversion into a tensor of type `torch.FloatTensor`.
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding indices in `input_values`. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
"""
input_values = input_values.unsqueeze(1)
hidden_states = nn.functional.tanh(self.conv1(input_values))
hidden_states = self.groupnorm(hidden_states)
hidden_states = nn.functional.gelu(self.conv2(hidden_states))
hidden_states = nn.functional.gelu(self.conv3(hidden_states))
hidden_states = hidden_states.permute(0, 2, 1)
if attention_mask is not None:
mask_len = self._get_feat_extract_output_lengths(attention_mask.shape[-1])
downsample_stride = 64 * 3 * 2
attention_mask = attention_mask[..., ::downsample_stride][..., :mask_len]
if self.config._attn_implementation == 'flash_attention_2':
attention_mask = attention_mask if (attention_mask == 0.0).any() else None
elif self.config._attn_implementation == 'sdpa':
attention_mask = _prepare_4d_attention_mask_for_sdpa(attention_mask, hidden_states.dtype)
else:
attention_mask = _prepare_4d_attention_mask(attention_mask, hidden_states.dtype)
position_ids = torch.arange(0, hidden_states.shape[1], device=hidden_states.device).unsqueeze(0)
position_embeddings = self.rotary_emb(hidden_states, position_ids)
for encoder_layer in self.layers:
hidden_states = encoder_layer(hidden_states, attention_mask=attention_mask, position_ids=position_ids, position_embeddings=position_embeddings, **kwargs)
hidden_states = self.layer_norm(hidden_states)
return BaseModelOutputWithPast(last_hidden_state=hidden_states)
|
class MoonshineEncoder(MoonshinePreTrainedModel):
'''
Transformer encoder consisting of *config.num_hidden_layers* layers. Each layer is a [`MoonshineEncoderLayer`]
Args:
config: MoonshineConfig
'''
def __init__(self, config: MoonshineConfig):
pass
def get_input_embeddings(self) -> nn.Module:
pass
def set_input_embeddings(self, value: nn.Module):
pass
@check_model_inputs
def forward(self, input_values: torch.FloatTensor, attention_mask: Optional[torch.Tensor]=None, **kwargs: Unpack[TransformersKwargs]) -> BaseModelOutputWithPast:
'''
Args:
input_values (`torch.FloatTensor` of shape `(batch_size, audio_length)`):
Float values of the raw speech waveform. Raw speech waveform can be
obtained by loading a `.flac` or `.wav` audio file into an array of type `list[float]`, a
`numpy.ndarray` or a `torch.Tensor`, *e.g.* via the torchcodec library (`pip install torchcodec`) or
the soundfile library (`pip install soundfile`). To prepare the array into
`input_values`, the [`AutoFeatureExtractor`] should be used for padding
and conversion into a tensor of type `torch.FloatTensor`.
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding indices in `input_values`. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
'''
pass
| 6
| 2
| 35
| 5
| 23
| 8
| 5
| 0.39
| 1
| 10
| 5
| 0
| 4
| 9
| 4
| 6
| 152
| 24
| 93
| 34
| 80
| 36
| 57
| 26
| 52
| 17
| 2
| 2
| 20
|
3,983
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/moonshine/modeling_moonshine.py
|
transformers.models.moonshine.modeling_moonshine.MoonshineEncoderLayer
|
from typing import Callable, Optional, Union
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache
from .configuration_moonshine import MoonshineConfig
from ...utils.deprecation import deprecate_kwarg
import torch
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple
from ...modeling_layers import GradientCheckpointingLayer
from ...processing_utils import Unpack
import torch.nn as nn
class MoonshineEncoderLayer(GradientCheckpointingLayer):
def __init__(self, config: MoonshineConfig, layer_idx: int):
super().__init__()
self.hidden_size = config.hidden_size
self.self_attn = MoonshineAttention(config=config, layer_idx=layer_idx, is_causal=False, num_attention_heads=config.encoder_num_attention_heads, num_key_value_heads=config.encoder_num_key_value_heads)
self.mlp = MoonshineEncoderMLP(config, config.encoder_hidden_act)
self.input_layernorm = nn.LayerNorm(config.hidden_size, bias=False)
self.post_attention_layernorm = nn.LayerNorm(config.hidden_size, bias=False)
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, use_cache: Optional[bool]=False, cache_position: Optional[torch.LongTensor]=None, position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]]=None, **kwargs: Unpack[TransformersKwargs]) -> torch.Tensor:
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
hidden_states, _ = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, use_cache=use_cache, cache_position=cache_position, position_embeddings=position_embeddings, **kwargs)
hidden_states = residual + hidden_states
residual = hidden_states
hidden_states = self.post_attention_layernorm(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + hidden_states
return hidden_states
|
class MoonshineEncoderLayer(GradientCheckpointingLayer):
def __init__(self, config: MoonshineConfig, layer_idx: int):
pass
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, use_cache: Optional[bool]=False, cache_position: Optional[torch.LongTensor]=None, position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]]=None, **kwargs: Unpack[TransformersKwargs]) -> torch.Tensor:
pass
| 4
| 0
| 28
| 4
| 24
| 2
| 2
| 0.06
| 1
| 9
| 5
| 0
| 2
| 5
| 2
| 12
| 58
| 8
| 48
| 22
| 34
| 3
| 21
| 11
| 18
| 2
| 1
| 1
| 3
|
3,984
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/moonshine/modeling_moonshine.py
|
transformers.models.moonshine.modeling_moonshine.MoonshineEncoderMLP
|
import torch.nn as nn
from ...activations import ACT2FN
import torch
class MoonshineEncoderMLP(nn.Module):
def __init__(self, config, hidden_act):
super().__init__()
self.config = config
self.activation_fn = ACT2FN[hidden_act]
self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size)
self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.fc1(hidden_states)
hidden_states = self.activation_fn(hidden_states)
hidden_states = self.fc2(hidden_states)
return hidden_states
|
class MoonshineEncoderMLP(nn.Module):
def __init__(self, config, hidden_act):
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 6
| 0
| 6
| 0
| 1
| 0
| 1
| 2
| 0
| 0
| 2
| 4
| 2
| 12
| 13
| 1
| 12
| 7
| 9
| 0
| 12
| 7
| 9
| 1
| 1
| 0
| 2
|
3,985
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/moonshine/modeling_moonshine.py
|
transformers.models.moonshine.modeling_moonshine.MoonshineForConditionalGeneration
|
from ...generation import GenerationMixin
import torch.nn as nn
import torch
from ...processing_utils import Unpack
from typing import Callable, Optional, Union
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple
from .configuration_moonshine import MoonshineConfig
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPast, BaseModelOutputWithPastAndCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache
@auto_docstring(custom_intro='\n The Moonshine Model with a language modeling head. Can be used for automatic speech recognition.\n ')
class MoonshineForConditionalGeneration(MoonshinePreTrainedModel, GenerationMixin):
_tied_weights_keys = ['proj_out.weight']
def __init__(self, config: MoonshineConfig):
super().__init__(config)
self.model = MoonshineModel(config)
self.proj_out = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.post_init()
def get_encoder(self):
return self.model.get_encoder()
def get_decoder(self):
return self.model.get_decoder()
def get_output_embeddings(self):
return self.proj_out
def set_output_embeddings(self, new_embeddings):
self.proj_out = new_embeddings
def get_input_embeddings(self) -> nn.Module:
return self.model.get_input_embeddings()
@can_return_tuple
@auto_docstring
def forward(self, input_values: Optional[torch.FloatTensor]=None, attention_mask: Optional[torch.LongTensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.LongTensor]=None, encoder_outputs: Optional[tuple[tuple[torch.FloatTensor]]]=None, past_key_values: Optional[Union[EncoderDecoderCache, tuple[torch.FloatTensor]]]=None, decoder_inputs_embeds: Optional[tuple[torch.FloatTensor]]=None, decoder_position_ids: Optional[tuple[torch.LongTensor]]=None, use_cache: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, labels: Optional[torch.LongTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Seq2SeqLMOutput:
"""
input_values (`torch.FloatTensor` of shape `(batch_size, audio_length)`):
Float values of the raw speech waveform. Raw speech waveform can be
obtained by loading a `.flac` or `.wav` audio file into an array of type `list[float]`, a
`numpy.ndarray` or a `torch.Tensor`, *e.g.* via the torchcodec library (`pip install torchcodec`) or
the soundfile library (`pip install soundfile`). To prepare the array into
`input_values`, the [`AutoFeatureExtractor`] should be used for padding
and conversion into a tensor of type `torch.FloatTensor`.
decoder_position_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`):
Indices of positions of each input sequence tokens in the position embeddings.
Used to calculate the position embeddings up to `config.decoder_config.max_position_embeddings`
Example:
```python
>>> import torch
>>> from transformers import AutoProcessor, MoonshineForConditionalGeneration
>>> from datasets import load_dataset
>>> processor = AutoProcessor.from_pretrained("UsefulSensors/moonshine-tiny")
>>> model = MoonshineForConditionalGeneration.from_pretrained("UsefulSensors/moonshine-tiny")
>>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
>>> inputs = processor(ds[0]["audio"]["array"], return_tensors="pt")
>>> input_values = inputs.input_values
>>> generated_ids = model.generate(input_values, max_new_tokens=100)
>>> transcription = processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
>>> transcription
'Mr. Quilter is the apostle of the middle classes, and we are glad to welcome his gospel.'
```"""
if labels is not None:
if decoder_input_ids is None and decoder_inputs_embeds is None:
decoder_input_ids = shift_tokens_right(labels, self.config.pad_token_id, self.config.decoder_start_token_id)
outputs: Seq2SeqModelOutput = self.model(input_values, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, encoder_outputs=encoder_outputs, decoder_attention_mask=decoder_attention_mask, past_key_values=past_key_values, decoder_inputs_embeds=decoder_inputs_embeds, decoder_position_ids=decoder_position_ids, use_cache=use_cache, cache_position=cache_position, **kwargs)
logits = self.proj_out(outputs.last_hidden_state)
loss = None
if labels is not None:
loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.vocab_size)
return Seq2SeqLMOutput(loss=loss, logits=logits, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions)
|
@auto_docstring(custom_intro='\n The Moonshine Model with a language modeling head. Can be used for automatic speech recognition.\n ')
class MoonshineForConditionalGeneration(MoonshinePreTrainedModel, GenerationMixin):
def __init__(self, config: MoonshineConfig):
pass
def get_encoder(self):
pass
def get_decoder(self):
pass
def get_output_embeddings(self):
pass
def set_output_embeddings(self, new_embeddings):
pass
def get_input_embeddings(self) -> nn.Module:
pass
@can_return_tuple
@auto_docstring
def forward(self, input_values: Optional[torch.FloatTensor]=None, attention_mask: Optional[torch.LongTensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.LongTensor]=None, encoder_outputs: Optional[tuple[tuple[torch.FloatTensor]]]=None, past_key_values: Optional[Union[EncoderDecoderCache, tuple[torch.FloatTensor]]]=None, decoder_inputs_embeds: Optional[tuple[torch.FloatTensor]]=None, decoder_position_ids: Optional[tuple[torch.LongTensor]]=None, use_cache: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, labels: Optional[torch.LongTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Seq2SeqLMOutput:
'''
input_values (`torch.FloatTensor` of shape `(batch_size, audio_length)`):
Float values of the raw speech waveform. Raw speech waveform can be
obtained by loading a `.flac` or `.wav` audio file into an array of type `list[float]`, a
`numpy.ndarray` or a `torch.Tensor`, *e.g.* via the torchcodec library (`pip install torchcodec`) or
the soundfile library (`pip install soundfile`). To prepare the array into
`input_values`, the [`AutoFeatureExtractor`] should be used for padding
and conversion into a tensor of type `torch.FloatTensor`.
decoder_position_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`):
Indices of positions of each input sequence tokens in the position embeddings.
Used to calculate the position embeddings up to `config.decoder_config.max_position_embeddings`
Example:
```python
>>> import torch
>>> from transformers import AutoProcessor, MoonshineForConditionalGeneration
>>> from datasets import load_dataset
>>> processor = AutoProcessor.from_pretrained("UsefulSensors/moonshine-tiny")
>>> model = MoonshineForConditionalGeneration.from_pretrained("UsefulSensors/moonshine-tiny")
>>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
>>> inputs = processor(ds[0]["audio"]["array"], return_tensors="pt")
>>> input_values = inputs.input_values
>>> generated_ids = model.generate(input_values, max_new_tokens=100)
>>> transcription = processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
>>> transcription
'Mr. Quilter is the apostle of the middle classes, and we are glad to welcome his gospel.'
```'''
pass
| 11
| 1
| 15
| 2
| 10
| 3
| 2
| 0.29
| 2
| 7
| 4
| 0
| 7
| 2
| 7
| 9
| 118
| 21
| 75
| 32
| 49
| 22
| 31
| 15
| 23
| 7
| 2
| 2
| 13
|
3,986
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/moonshine/modeling_moonshine.py
|
transformers.models.moonshine.modeling_moonshine.MoonshineModel
|
from typing import Callable, Optional, Union
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPast, BaseModelOutputWithPastAndCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput
from .configuration_moonshine import MoonshineConfig
import torch.nn as nn
from ...processing_utils import Unpack
import torch
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple
@auto_docstring
class MoonshineModel(MoonshinePreTrainedModel):
def __init__(self, config: MoonshineConfig):
super().__init__(config)
self.encoder = MoonshineEncoder(config)
self.decoder = MoonshineDecoder(config)
self.post_init()
def get_input_embeddings(self):
return self.decoder.embed_tokens
def set_input_embeddings(self, value):
self.decoder.embed_tokens = value
def get_encoder(self):
return self.encoder
def freeze_encoder(self):
"""
Calling this function will disable the gradient computation for the Moonshine encoder so that its parameters will
not be updated during training.
"""
self.encoder._freeze_parameters()
def _mask_input_features(self, input_features: torch.FloatTensor, attention_mask: Optional[torch.LongTensor]=None):
"""
Masks extracted features along time axis and/or along feature axis according to
[SpecAugment](https://huggingface.co/papers/1904.08779).
"""
if not getattr(self.config, 'apply_spec_augment', True):
return input_features
batch_size, hidden_size, sequence_length = input_features.size()
if self.config.mask_time_prob > 0 and self.training:
mask_time_indices = _compute_mask_indices((batch_size, sequence_length), mask_prob=self.config.mask_time_prob, mask_length=self.config.mask_time_length, attention_mask=attention_mask, min_masks=self.config.mask_time_min_masks)
mask_time_indices = torch.tensor(mask_time_indices, device=input_features.device, dtype=torch.bool)
mask_time_indices = mask_time_indices[:, None].expand(-1, hidden_size, -1)
input_features[mask_time_indices] = 0
if self.config.mask_feature_prob > 0 and self.training:
mask_feature_indices = _compute_mask_indices((batch_size, hidden_size), mask_prob=self.config.mask_feature_prob, mask_length=self.config.mask_feature_length, min_masks=self.config.mask_feature_min_masks)
mask_feature_indices = torch.tensor(mask_feature_indices, device=input_features.device, dtype=torch.bool)
input_features[mask_feature_indices] = 0
return input_features
@can_return_tuple
@auto_docstring
def forward(self, input_values: Optional[torch.FloatTensor]=None, attention_mask: Optional[torch.LongTensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.LongTensor]=None, encoder_outputs: Optional[tuple[tuple[torch.FloatTensor]]]=None, past_key_values: Optional[Union[EncoderDecoderCache, tuple[torch.FloatTensor]]]=None, decoder_inputs_embeds: Optional[tuple[torch.FloatTensor]]=None, decoder_position_ids: Optional[tuple[torch.LongTensor]]=None, use_cache: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Seq2SeqModelOutput:
"""
input_values (`torch.FloatTensor` of shape `(batch_size, audio_length)`):
Float values of the raw speech waveform. Raw speech waveform can be
obtained by loading a `.flac` or `.wav` audio file into an array of type `list[float]`, a
`numpy.ndarray` or a `torch.Tensor`, *e.g.* via the torchcodec library (`pip install torchcodec`) or
the soundfile library (`pip install soundfile`). To prepare the array into
`input_values`, the [`AutoFeatureExtractor`] should be used for padding
and conversion into a tensor of type `torch.FloatTensor`.
decoder_position_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`):
Indices of positions of each input sequence tokens in the position embeddings.
Used to calculate the position embeddings up to `config.decoder_config.max_position_embeddings`
Example:
```python
>>> import torch
>>> from transformers import AutoFeatureExtractor, MoonshineModel
>>> from datasets import load_dataset
>>> model = MoonshineModel.from_pretrained("UsefulSensors/moonshine-tiny")
>>> feature_extractor = AutoFeatureExtractor.from_pretrained("UsefulSensors/moonshine-tiny")
>>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
>>> inputs = feature_extractor(ds[0]["audio"]["array"], return_tensors="pt")
>>> input_values = inputs.input_values
>>> decoder_input_ids = torch.tensor([[1, 1]]) * model.config.decoder_start_token_id
>>> last_hidden_state = model(input_values, decoder_input_ids=decoder_input_ids).last_hidden_state
>>> list(last_hidden_state.shape)
[1, 2, 288]
```
"""
if encoder_outputs is None:
encoder_outputs: BaseModelOutput = self.encoder(input_values, attention_mask=attention_mask, **kwargs)
decoder_outputs: BaseModelOutputWithPastAndCrossAttentions = self.decoder(input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, encoder_attention_mask=attention_mask, encoder_hidden_states=encoder_outputs.last_hidden_state, past_key_values=past_key_values, inputs_embeds=decoder_inputs_embeds, position_ids=decoder_position_ids, use_cache=use_cache, cache_position=cache_position, **kwargs)
return Seq2SeqModelOutput(last_hidden_state=decoder_outputs.last_hidden_state, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions)
|
@auto_docstring
class MoonshineModel(MoonshinePreTrainedModel):
def __init__(self, config: MoonshineConfig):
pass
def get_input_embeddings(self):
pass
def set_input_embeddings(self, value):
pass
def get_encoder(self):
pass
def freeze_encoder(self):
'''
Calling this function will disable the gradient computation for the Moonshine encoder so that its parameters will
not be updated during training.
'''
pass
def _mask_input_features(self, input_features: torch.FloatTensor, attention_mask: Optional[torch.LongTensor]=None):
'''
Masks extracted features along time axis and/or along feature axis according to
[SpecAugment](https://huggingface.co/papers/1904.08779).
'''
pass
@can_return_tuple
@auto_docstring
def forward(self, input_values: Optional[torch.FloatTensor]=None, attention_mask: Optional[torch.LongTensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.LongTensor]=None, encoder_outputs: Optional[tuple[tuple[torch.FloatTensor]]]=None, past_key_values: Optional[Union[EncoderDecoderCache, tuple[torch.FloatTensor]]]=None, decoder_inputs_embeds: Optional[tuple[torch.FloatTensor]]=None, decoder_position_ids: Optional[tuple[torch.LongTensor]]=None, use_cache: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Seq2SeqModelOutput:
'''
input_values (`torch.FloatTensor` of shape `(batch_size, audio_length)`):
Float values of the raw speech waveform. Raw speech waveform can be
obtained by loading a `.flac` or `.wav` audio file into an array of type `list[float]`, a
`numpy.ndarray` or a `torch.Tensor`, *e.g.* via the torchcodec library (`pip install torchcodec`) or
the soundfile library (`pip install soundfile`). To prepare the array into
`input_values`, the [`AutoFeatureExtractor`] should be used for padding
and conversion into a tensor of type `torch.FloatTensor`.
decoder_position_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`):
Indices of positions of each input sequence tokens in the position embeddings.
Used to calculate the position embeddings up to `config.decoder_config.max_position_embeddings`
Example:
```python
>>> import torch
>>> from transformers import AutoFeatureExtractor, MoonshineModel
>>> from datasets import load_dataset
>>> model = MoonshineModel.from_pretrained("UsefulSensors/moonshine-tiny")
>>> feature_extractor = AutoFeatureExtractor.from_pretrained("UsefulSensors/moonshine-tiny")
>>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
>>> inputs = feature_extractor(ds[0]["audio"]["array"], return_tensors="pt")
>>> input_values = inputs.input_values
>>> decoder_input_ids = torch.tensor([[1, 1]]) * model.config.decoder_start_token_id
>>> last_hidden_state = model(input_values, decoder_input_ids=decoder_input_ids).last_hidden_state
>>> list(last_hidden_state.shape)
[1, 2, 288]
```
'''
pass
| 11
| 3
| 19
| 2
| 13
| 4
| 3
| 0.29
| 1
| 9
| 6
| 0
| 8
| 2
| 8
| 10
| 161
| 20
| 109
| 35
| 79
| 32
| 42
| 15
| 33
| 10
| 2
| 1
| 20
|
3,987
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/moonshine/modeling_moonshine.py
|
transformers.models.moonshine.modeling_moonshine.MoonshinePreTrainedModel
|
import torch
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
import torch.nn as nn
from .configuration_moonshine import MoonshineConfig
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple
@auto_docstring
class MoonshinePreTrainedModel(PreTrainedModel):
config: MoonshineConfig
base_model_prefix = 'model'
main_input_name = 'input_values'
supports_gradient_checkpointing = True
_no_split_modules = ['MoonshineEncoderLayer', 'MoonshineDecoderLayer']
_supports_flash_attn = True
_supports_sdpa = True
_can_compile_fullgraph = True
def _get_feat_extract_output_lengths(self, input_lengths: torch.LongTensor):
"""
Computes the output length of the convolutional layers
"""
output_conv1_length = int((input_lengths - 127) / 64 + 1)
output_conv2_length = int((output_conv1_length - 7) / 3 + 1)
output_conv3_length = int((output_conv2_length - 3) / 2 + 1)
return output_conv3_length
|
@auto_docstring
class MoonshinePreTrainedModel(PreTrainedModel):
def _get_feat_extract_output_lengths(self, input_lengths: torch.LongTensor):
'''
Computes the output length of the convolutional layers
'''
pass
| 3
| 1
| 10
| 1
| 8
| 2
| 3
| 0.12
| 1
| 1
| 0
| 4
| 2
| 0
| 2
| 2
| 31
| 3
| 25
| 16
| 22
| 3
| 24
| 16
| 21
| 5
| 1
| 2
| 6
|
3,988
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/moonshine/modeling_moonshine.py
|
transformers.models.moonshine.modeling_moonshine.MoonshineRotaryEmbedding
|
import torch.nn as nn
from ...modeling_rope_utils import ROPE_INIT_FUNCTIONS, dynamic_rope_update
from .configuration_moonshine import MoonshineConfig
import torch
class MoonshineRotaryEmbedding(nn.Module):
inv_freq: torch.Tensor
def __init__(self, config: MoonshineConfig, device=None):
super().__init__()
if hasattr(config, 'rope_scaling') and isinstance(config.rope_scaling, dict):
self.rope_type = config.rope_scaling.get('rope_type', config.rope_scaling.get('type'))
else:
self.rope_type = 'default'
self.max_seq_len_cached = config.max_position_embeddings
self.original_max_seq_len = config.max_position_embeddings
self.config = config
self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device)
self.register_buffer('inv_freq', inv_freq, persistent=False)
self.original_inv_freq = self.inv_freq
@torch.no_grad()
@dynamic_rope_update
def forward(self, x, position_ids):
inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device)
position_ids_expanded = position_ids[:, None, :].float()
device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != 'mps' else 'cpu'
with torch.autocast(device_type=device_type, enabled=False):
freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
emb = torch.cat((freqs, freqs), dim=-1)
cos = emb.cos() * self.attention_scaling
sin = emb.sin() * self.attention_scaling
return (cos.to(dtype=x.dtype), sin.to(dtype=x.dtype))
|
class MoonshineRotaryEmbedding(nn.Module):
def __init__(self, config: MoonshineConfig, device=None):
pass
@torch.no_grad()
@dynamic_rope_update
def forward(self, x, position_ids):
pass
| 5
| 0
| 18
| 2
| 13
| 5
| 3
| 0.35
| 1
| 4
| 1
| 0
| 3
| 7
| 3
| 13
| 59
| 8
| 40
| 21
| 35
| 14
| 38
| 20
| 34
| 3
| 1
| 1
| 8
|
3,989
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/moonshine/modular_moonshine.py
|
transformers.models.moonshine.modular_moonshine.MoonshineAttention
|
from ...utils.deprecation import deprecate_kwarg
from typing import Callable, Optional, Union
from ..llama.modeling_llama import LlamaDecoderLayer, LlamaModel, eager_attention_forward
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
import torch
import torch.nn as nn
from ..glm.modeling_glm import GlmAttention, GlmRotaryEmbedding, apply_rotary_pos_emb
from ...processing_utils import Unpack
from ...modeling_flash_attention_utils import FlashAttentionKwargs
class MoonshineAttention(GlmAttention):
def __init__(self, config: MoonshineConfig, layer_idx: int, is_causal: bool, num_attention_heads: int, num_key_value_heads: int):
config.update({'num_attention_heads': num_attention_heads, 'num_key_value_heads': num_key_value_heads})
super().__init__(config, layer_idx)
self.is_causal = is_causal
self.head_dim = getattr(config, 'head_dim', config.hidden_size // config.num_attention_heads)
if self.config.pad_head_dim_to_multiple_of is not None:
target_multiple = self.config.pad_head_dim_to_multiple_of
target_head_dim = target_multiple * ((self.head_dim + target_multiple - 1) // target_multiple)
self.head_dim_padding = target_head_dim - self.head_dim
else:
self.head_dim_padding = 0
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]]=None, attention_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, cache_position: Optional[torch.LongTensor]=None, key_value_states: Optional[torch.Tensor]=None, **kwargs: Unpack[FlashAttentionKwargs]) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
bsz, q_len = hidden_states.shape[:-1]
query_states = self.q_proj(hidden_states).view(bsz, q_len, self.config.num_key_value_heads, self.head_dim).transpose(1, 2)
is_cross_attention = key_value_states is not None
if past_key_values is not None:
is_updated = past_key_values.is_updated.get(self.layer_idx)
if is_cross_attention:
past_key_values.is_updated[self.layer_idx] = True
past_key_values = past_key_values.cross_attention_cache
else:
past_key_values = past_key_values.self_attention_cache
current_states = key_value_states if key_value_states is not None else hidden_states
if is_cross_attention and past_key_values and is_updated:
key_states = past_key_values.layers[self.layer_idx].keys
value_states = past_key_values.layers[self.layer_idx].values
else:
key_states = self.k_proj(current_states).view(bsz, -1, self.config.num_key_value_heads, self.head_dim).transpose(1, 2)
value_states = self.v_proj(current_states).view(bsz, -1, self.config.num_key_value_heads, self.head_dim).transpose(1, 2)
if is_cross_attention and past_key_values is not None:
key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, {'cache_position': cache_position})
if not is_cross_attention:
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
if past_key_values is not None:
cache_kwargs = {'sin': sin, 'cos': cos, 'cache_position': cache_position}
key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != 'eager':
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
is_causal = self.is_causal and attention_mask is None and (q_len > 1)
if self.head_dim_padding > 0:
query_states = torch.nn.functional.pad(query_states, (0, self.head_dim_padding))
key_states = torch.nn.functional.pad(key_states, (0, self.head_dim_padding))
value_states = torch.nn.functional.pad(value_states, (0, self.head_dim_padding))
attn_output, attn_weights = attention_interface(self, query_states, key_states, value_states, attention_mask, dropout=0.0 if not self.training else self.attention_dropout, scaling=self.scaling, is_causal=is_causal, **kwargs)
if self.head_dim_padding > 0:
attn_output = attn_output[..., :-self.head_dim_padding]
attn_output = attn_output.reshape(bsz, q_len, -1).contiguous()
attn_output = self.o_proj(attn_output)
return (attn_output, attn_weights)
|
class MoonshineAttention(GlmAttention):
def __init__(self, config: MoonshineConfig, layer_idx: int, is_causal: bool, num_attention_heads: int, num_key_value_heads: int):
pass
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]]=None, attention_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, cache_position: Optional[torch.LongTensor]=None, key_value_states: Optional[torch.Tensor]=None, **kwargs: Unpack[FlashAttentionKwargs]) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
pass
| 4
| 0
| 56
| 6
| 49
| 2
| 8
| 0.03
| 1
| 7
| 3
| 0
| 2
| 5
| 2
| 14
| 114
| 13
| 98
| 38
| 79
| 3
| 51
| 20
| 48
| 14
| 2
| 2
| 16
|
3,990
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/moonshine/modular_moonshine.py
|
transformers.models.moonshine.modular_moonshine.MoonshineConfig
|
from ...configuration_utils import PretrainedConfig
from ...modeling_rope_utils import rope_config_validation
class MoonshineConfig(PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`MoonshineModel`]. It is used to instantiate a Moonshine
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the Moonshine
[UsefulSensors/moonshine-tiny](https://huggingface.co/UsefulSensors/moonshine-tiny).
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 32768):
Vocabulary size of the Moonshine model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`MoonshineModel`].
hidden_size (`int`, *optional*, defaults to 288):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 1152):
Dimension of the MLP representations.
encoder_num_hidden_layers (`int`, *optional*, defaults to 6):
Number of hidden layers in the Transformer encoder.
decoder_num_hidden_layers (`int`, *optional*, defaults to 6):
Number of hidden layers in the Transformer decoder.
encoder_num_attention_heads (`int`, *optional*, defaults to 8):
Number of attention heads for each attention layer in the Transformer encoder.
decoder_num_attention_heads (`int`, *optional*, defaults to 8):
Number of attention heads for each attention layer in the Transformer decoder.
encoder_num_key_value_heads (`int`, *optional*):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`encoder_num_key_value_heads=encoder_num_attention_heads`, the model will use Multi Head Attention (MHA), if
`encoder_num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details, check out [this
paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to
`num_attention_heads`.
decoder_num_key_value_heads (`int`, *optional*):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`decoder_num_key_value_heads=decoder_num_attention_heads`, the model will use Multi Head Attention (MHA), if
`decoder_num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details, check out [this
paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to
`decoder_num_attention_heads`.
pad_head_dim_to_multiple_of (`int`, *optional*):
Pad head dimension in encoder and decoder to the next multiple of this value. Necessary for using certain
optimized attention implementations.
encoder_hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder.
decoder_hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the decoder.
max_position_embeddings (`int`, *optional*, defaults to 512):
The maximum sequence length that this model might ever be used with.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
decoder_start_token_id (`int`, *optional*, defaults to 1):
Corresponds to the "<|startoftranscript|>" token, which is automatically used when no `decoder_input_ids`
are provided to the `generate` function. It is used to guide the model`s generation process depending on
the task.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
rope_theta (`float`, *optional*, defaults to 10000.0):
The base period of the RoPE embeddings.
rope_scaling (`Dict`, *optional*):
Dictionary containing the scaling configuration for the RoPE embeddings. NOTE: if you apply new rope type
and you expect the model to work on longer `max_position_embeddings`, we recommend you to update this value
accordingly.
Expected contents:
`rope_type` (`str`):
The sub-variant of RoPE to use. Can be one of ['default', 'linear', 'dynamic', 'yarn', 'longrope',
'llama3'], with 'default' being the original RoPE implementation.
`factor` (`float`, *optional*):
Used with all rope types except 'default'. The scaling factor to apply to the RoPE embeddings. In
most scaling types, a `factor` of x will enable the model to handle sequences of length x *
original maximum pre-trained length.
`original_max_position_embeddings` (`int`, *optional*):
Used with 'dynamic', 'longrope' and 'llama3'. The original max position embeddings used during
pretraining.
`attention_factor` (`float`, *optional*):
Used with 'yarn' and 'longrope'. The scaling factor to be applied on the attention
computation. If unspecified, it defaults to value recommended by the implementation, using the
`factor` field to infer the suggested value.
`beta_fast` (`float`, *optional*):
Only used with 'yarn'. Parameter to set the boundary for extrapolation (only) in the linear
ramp function. If unspecified, it defaults to 32.
`beta_slow` (`float`, *optional*):
Only used with 'yarn'. Parameter to set the boundary for interpolation (only) in the linear
ramp function. If unspecified, it defaults to 1.
`short_factor` (`list[float]`, *optional*):
Only used with 'longrope'. The scaling factor to be applied to short contexts (<
`original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden
size divided by the number of attention heads divided by 2
`long_factor` (`list[float]`, *optional*):
Only used with 'longrope'. The scaling factor to be applied to long contexts (<
`original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden
size divided by the number of attention heads divided by 2
`low_freq_factor` (`float`, *optional*):
Only used with 'llama3'. Scaling factor applied to low frequency components of the RoPE
`high_freq_factor` (`float`, *optional*):
Only used with 'llama3'. Scaling factor applied to high frequency components of the RoPE
partial_rotary_factor (`float`, *optional*, defaults to 0.9):
Percentage of the query and keys which will have rotary embedding.
is_encoder_decoder (`bool`, *optional*, defaults to `True`):
Whether the model is used as an encoder/decoder or not.
attention_bias (`bool`, *optional*, defaults to `False`):
Whether to use a bias in the query, key, value and output projection layers during self-attention.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
bos_token_id (`int`, *optional*, defaults to 1):
Denotes beginning of sequences token id.
eos_token_id (`int`, *optional*, defaults to 2):
Denotes end of sequences token id.
Example:
```python
>>> from transformers import MoonshineModel, MoonshineConfig
>>> # Initializing a Moonshine style configuration
>>> configuration = MoonshineConfig().from_pretrained("UsefulSensors/moonshine-tiny")
>>> # Initializing a model from the configuration
>>> model = MoonshineModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = 'moonshine'
keys_to_ignore_at_inference = ['past_key_values']
attribute_map = {'num_key_value_heads': 'encoder_num_key_value_heads', 'num_attention_heads': 'encoder_num_attention_heads', 'num_hidden_layers': 'encoder_num_hidden_layers'}
def __init__(self, vocab_size=32768, hidden_size=288, intermediate_size=1152, encoder_num_hidden_layers=6, decoder_num_hidden_layers=6, encoder_num_attention_heads=8, decoder_num_attention_heads=8, encoder_num_key_value_heads=None, decoder_num_key_value_heads=None, pad_head_dim_to_multiple_of=None, encoder_hidden_act='gelu', decoder_hidden_act='silu', max_position_embeddings=512, initializer_range=0.02, decoder_start_token_id=1, use_cache=True, rope_theta=10000.0, rope_scaling=None, partial_rotary_factor=0.9, is_encoder_decoder=True, attention_bias=False, attention_dropout=0.0, bos_token_id=1, eos_token_id=2, **kwargs):
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.encoder_num_hidden_layers = encoder_num_hidden_layers
self.decoder_num_hidden_layers = decoder_num_hidden_layers
self.encoder_num_attention_heads = encoder_num_attention_heads
self.decoder_num_attention_heads = decoder_num_attention_heads
if encoder_num_key_value_heads is None:
encoder_num_key_value_heads = encoder_num_attention_heads
self.encoder_num_key_value_heads = encoder_num_key_value_heads
if decoder_num_key_value_heads is None:
decoder_num_key_value_heads = decoder_num_attention_heads
self.decoder_num_key_value_heads = decoder_num_key_value_heads
self.pad_head_dim_to_multiple_of = pad_head_dim_to_multiple_of
self.encoder_hidden_act = encoder_hidden_act
self.decoder_hidden_act = decoder_hidden_act
self.max_position_embeddings = max_position_embeddings
self.initializer_range = initializer_range
self.decoder_start_token_id = decoder_start_token_id
self.use_cache = use_cache
self.rope_theta = rope_theta
self.rope_scaling = rope_scaling
self.partial_rotary_factor = partial_rotary_factor
self.is_encoder_decoder = is_encoder_decoder
self.attention_bias = attention_bias
self.attention_dropout = attention_dropout
rope_config_validation(self)
super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, is_encoder_decoder=is_encoder_decoder, decoder_start_token_id=decoder_start_token_id, **kwargs)
|
class MoonshineConfig(PretrainedConfig):
'''
This is the configuration class to store the configuration of a [`MoonshineModel`]. It is used to instantiate a Moonshine
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the Moonshine
[UsefulSensors/moonshine-tiny](https://huggingface.co/UsefulSensors/moonshine-tiny).
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 32768):
Vocabulary size of the Moonshine model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`MoonshineModel`].
hidden_size (`int`, *optional*, defaults to 288):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 1152):
Dimension of the MLP representations.
encoder_num_hidden_layers (`int`, *optional*, defaults to 6):
Number of hidden layers in the Transformer encoder.
decoder_num_hidden_layers (`int`, *optional*, defaults to 6):
Number of hidden layers in the Transformer decoder.
encoder_num_attention_heads (`int`, *optional*, defaults to 8):
Number of attention heads for each attention layer in the Transformer encoder.
decoder_num_attention_heads (`int`, *optional*, defaults to 8):
Number of attention heads for each attention layer in the Transformer decoder.
encoder_num_key_value_heads (`int`, *optional*):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`encoder_num_key_value_heads=encoder_num_attention_heads`, the model will use Multi Head Attention (MHA), if
`encoder_num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details, check out [this
paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to
`num_attention_heads`.
decoder_num_key_value_heads (`int`, *optional*):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`decoder_num_key_value_heads=decoder_num_attention_heads`, the model will use Multi Head Attention (MHA), if
`decoder_num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details, check out [this
paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to
`decoder_num_attention_heads`.
pad_head_dim_to_multiple_of (`int`, *optional*):
Pad head dimension in encoder and decoder to the next multiple of this value. Necessary for using certain
optimized attention implementations.
encoder_hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder.
decoder_hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the decoder.
max_position_embeddings (`int`, *optional*, defaults to 512):
The maximum sequence length that this model might ever be used with.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
decoder_start_token_id (`int`, *optional*, defaults to 1):
Corresponds to the "<|startoftranscript|>" token, which is automatically used when no `decoder_input_ids`
are provided to the `generate` function. It is used to guide the model`s generation process depending on
the task.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
rope_theta (`float`, *optional*, defaults to 10000.0):
The base period of the RoPE embeddings.
rope_scaling (`Dict`, *optional*):
Dictionary containing the scaling configuration for the RoPE embeddings. NOTE: if you apply new rope type
and you expect the model to work on longer `max_position_embeddings`, we recommend you to update this value
accordingly.
Expected contents:
`rope_type` (`str`):
The sub-variant of RoPE to use. Can be one of ['default', 'linear', 'dynamic', 'yarn', 'longrope',
'llama3'], with 'default' being the original RoPE implementation.
`factor` (`float`, *optional*):
Used with all rope types except 'default'. The scaling factor to apply to the RoPE embeddings. In
most scaling types, a `factor` of x will enable the model to handle sequences of length x *
original maximum pre-trained length.
`original_max_position_embeddings` (`int`, *optional*):
Used with 'dynamic', 'longrope' and 'llama3'. The original max position embeddings used during
pretraining.
`attention_factor` (`float`, *optional*):
Used with 'yarn' and 'longrope'. The scaling factor to be applied on the attention
computation. If unspecified, it defaults to value recommended by the implementation, using the
`factor` field to infer the suggested value.
`beta_fast` (`float`, *optional*):
Only used with 'yarn'. Parameter to set the boundary for extrapolation (only) in the linear
ramp function. If unspecified, it defaults to 32.
`beta_slow` (`float`, *optional*):
Only used with 'yarn'. Parameter to set the boundary for interpolation (only) in the linear
ramp function. If unspecified, it defaults to 1.
`short_factor` (`list[float]`, *optional*):
Only used with 'longrope'. The scaling factor to be applied to short contexts (<
`original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden
size divided by the number of attention heads divided by 2
`long_factor` (`list[float]`, *optional*):
Only used with 'longrope'. The scaling factor to be applied to long contexts (<
`original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden
size divided by the number of attention heads divided by 2
`low_freq_factor` (`float`, *optional*):
Only used with 'llama3'. Scaling factor applied to low frequency components of the RoPE
`high_freq_factor` (`float`, *optional*):
Only used with 'llama3'. Scaling factor applied to high frequency components of the RoPE
partial_rotary_factor (`float`, *optional*, defaults to 0.9):
Percentage of the query and keys which will have rotary embedding.
is_encoder_decoder (`bool`, *optional*, defaults to `True`):
Whether the model is used as an encoder/decoder or not.
attention_bias (`bool`, *optional*, defaults to `False`):
Whether to use a bias in the query, key, value and output projection layers during self-attention.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
bos_token_id (`int`, *optional*, defaults to 1):
Denotes beginning of sequences token id.
eos_token_id (`int`, *optional*, defaults to 2):
Denotes end of sequences token id.
Example:
```python
>>> from transformers import MoonshineModel, MoonshineConfig
>>> # Initializing a Moonshine style configuration
>>> configuration = MoonshineConfig().from_pretrained("UsefulSensors/moonshine-tiny")
>>> # Initializing a model from the configuration
>>> model = MoonshineModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```'''
def __init__(self, vocab_size=32768, hidden_size=288, intermediate_size=1152, encoder_num_hidden_layers=6, decoder_num_hidden_layers=6, encoder_num_attention_heads=8, decoder_num_attention_heads=8, encoder_num_key_value_heads=None, decoder_num_key_value_heads=None, pad_head_dim_to_multiple_of=None, encoder_hidden_act='gelu', decoder_hidden_act='silu', max_position_embeddings=512, initializer_range=0.02, decoder_start_token_id=1, use_cache=True, rope_theta=10000.0, rope_scaling=None, partial_rotary_factor=0.9, is_encoder_decoder=True, attention_bias=False, attention_dropout=0.0, bos_token_id=1, eos_token_id=2, **kwargs):
pass
| 2
| 1
| 69
| 6
| 62
| 1
| 3
| 1.69
| 1
| 1
| 0
| 0
| 1
| 22
| 1
| 33
| 203
| 15
| 70
| 54
| 41
| 118
| 33
| 27
| 31
| 3
| 2
| 1
| 3
|
3,991
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/moonshine/modular_moonshine.py
|
transformers.models.moonshine.modular_moonshine.MoonshineDecoder
|
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPast, BaseModelOutputWithPastAndCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput
import torch.nn as nn
from transformers.utils.generic import OutputRecorder, check_model_inputs
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple, logging
from typing import Callable, Optional, Union
from ...masking_utils import create_causal_mask
import torch
from ...processing_utils import Unpack
from ...modeling_attn_mask_utils import _prepare_4d_attention_mask, _prepare_4d_attention_mask_for_sdpa
from ..llama.modeling_llama import LlamaDecoderLayer, LlamaModel, eager_attention_forward
class MoonshineDecoder(LlamaModel):
main_input_name = 'input_ids'
_can_record_outputs = {'attentions': OutputRecorder(MoonshineAttention, index=1, layer_name='self_attn'), 'hidden_states': MoonshineDecoderLayer, 'cross_attentions': OutputRecorder(MoonshineAttention, index=1, layer_name='encoder_attn')}
def __init__(self, config: MoonshineConfig):
super().__init__(config)
self.norm = nn.LayerNorm(config.hidden_size, bias=False)
self.layers = nn.ModuleList([MoonshineDecoderLayer(config, idx) for idx in range(config.decoder_num_hidden_layers)])
@check_model_inputs
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple, BaseModelOutputWithPast]:
"""
encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
of the decoder.
encoder_attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding indices in `encoder_hidden_states`. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
"""
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError('You must specify exactly one of input_ids or inputs_embeds')
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids)
if use_cache and past_key_values is None:
past_key_values = EncoderDecoderCache(DynamicCache(config=self.config), DynamicCache(config=self.config))
if cache_position is None:
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
cache_position = torch.arange(past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device)
if position_ids is None:
position_ids = cache_position.unsqueeze(0)
causal_mask = create_causal_mask(config=self.config, input_embeds=inputs_embeds, attention_mask=attention_mask, cache_position=cache_position, past_key_values=past_key_values, position_ids=position_ids)
hidden_states = inputs_embeds
position_embeddings = self.rotary_emb(hidden_states, position_ids)
if encoder_attention_mask is not None:
mask_len = encoder_hidden_states.shape[-2]
downsample_stride = 64 * 3 * 2
encoder_attention_mask = encoder_attention_mask[..., ::downsample_stride][..., :mask_len]
if self.config._attn_implementation == 'flash_attention_2':
encoder_attention_mask = encoder_attention_mask if (encoder_attention_mask == 0.0).any() else None
elif self.config._attn_implementation == 'sdpa':
encoder_attention_mask = _prepare_4d_attention_mask_for_sdpa(encoder_attention_mask, hidden_states.dtype, hidden_states.shape[-2])
else:
encoder_attention_mask = _prepare_4d_attention_mask(encoder_attention_mask, hidden_states.dtype, hidden_states.shape[-2])
for decoder_layer in self.layers:
hidden_states = decoder_layer(hidden_states, causal_mask, encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, position_ids=position_ids, past_key_values=past_key_values, use_cache=use_cache, cache_position=cache_position, position_embeddings=position_embeddings, **kwargs)
hidden_states = self.norm(hidden_states)
return BaseModelOutputWithPastAndCrossAttentions(last_hidden_state=hidden_states, past_key_values=past_key_values if use_cache else None)
|
class MoonshineDecoder(LlamaModel):
def __init__(self, config: MoonshineConfig):
pass
@check_model_inputs
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple, BaseModelOutputWithPast]:
'''
encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
of the decoder.
encoder_attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding indices in `encoder_hidden_states`. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
'''
pass
| 4
| 1
| 77
| 10
| 58
| 10
| 14
| 0.16
| 1
| 13
| 8
| 0
| 2
| 2
| 2
| 9
| 158
| 22
| 118
| 35
| 100
| 19
| 57
| 20
| 54
| 27
| 3
| 3
| 28
|
3,992
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/moonshine/modular_moonshine.py
|
transformers.models.moonshine.modular_moonshine.MoonshineDecoderLayer
|
from ...processing_utils import Unpack
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache
from ...modeling_layers import GradientCheckpointingLayer
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple, logging
import torch
from ...utils.deprecation import deprecate_kwarg
from typing import Callable, Optional, Union
import torch.nn as nn
class MoonshineDecoderLayer(GradientCheckpointingLayer):
def __init__(self, config: MoonshineConfig, layer_idx: Optional[int]=None):
super().__init__()
self.hidden_size = config.hidden_size
self.self_attn = MoonshineAttention(config=config, layer_idx=layer_idx, is_causal=True, num_attention_heads=config.decoder_num_attention_heads, num_key_value_heads=config.decoder_num_key_value_heads)
self.encoder_attn = MoonshineAttention(config=config, layer_idx=layer_idx, is_causal=False, num_attention_heads=config.decoder_num_attention_heads, num_key_value_heads=config.decoder_num_key_value_heads)
self.mlp = MoonshineDecoderMLP(config, config.decoder_hidden_act)
self.input_layernorm = nn.LayerNorm(config.hidden_size, bias=False)
self.post_attention_layernorm = nn.LayerNorm(config.hidden_size, bias=False)
self.final_layernorm = nn.LayerNorm(config.hidden_size, bias=False)
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, encoder_position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, use_cache: Optional[bool]=False, cache_position: Optional[torch.LongTensor]=None, position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]]=None, encoder_position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]]=None, **kwargs: Unpack[TransformersKwargs]) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]:
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
hidden_states, _ = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, use_cache=use_cache, cache_position=cache_position, position_embeddings=position_embeddings, **kwargs)
hidden_states = residual + hidden_states
if encoder_hidden_states is not None:
residual = hidden_states
hidden_states = self.post_attention_layernorm(hidden_states)
hidden_states, _ = self.encoder_attn(hidden_states=hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, past_key_values=past_key_values, use_cache=use_cache)
hidden_states = residual + hidden_states
residual = hidden_states
hidden_states = self.final_layernorm(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + hidden_states
return hidden_states
|
class MoonshineDecoderLayer(GradientCheckpointingLayer):
def __init__(self, config: MoonshineConfig, layer_idx: Optional[int]=None):
pass
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, encoder_position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, use_cache: Optional[bool]=False, cache_position: Optional[torch.LongTensor]=None, position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]]=None, encoder_position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]]=None, **kwargs: Unpack[TransformersKwargs]) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]:
pass
| 4
| 0
| 42
| 5
| 36
| 2
| 2
| 0.05
| 1
| 8
| 4
| 0
| 2
| 7
| 2
| 12
| 86
| 10
| 73
| 29
| 55
| 4
| 29
| 14
| 26
| 3
| 1
| 1
| 4
|
3,993
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/moonshine/modular_moonshine.py
|
transformers.models.moonshine.modular_moonshine.MoonshineDecoderMLP
|
import torch
from ...activations import ACT2FN
import torch.nn as nn
class MoonshineDecoderMLP(nn.Module):
def __init__(self, config, hidden_act):
super().__init__()
self.config = config
self.activation_fn = ACT2FN[hidden_act]
self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size * 2)
self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.fc1(hidden_states)
hidden_states, gate = hidden_states.chunk(2, dim=-1)
hidden_states = self.activation_fn(gate) * hidden_states
hidden_states = self.fc2(hidden_states)
return hidden_states
|
class MoonshineDecoderMLP(nn.Module):
def __init__(self, config, hidden_act):
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 6
| 0
| 6
| 0
| 1
| 0
| 1
| 2
| 0
| 0
| 2
| 4
| 2
| 12
| 14
| 1
| 13
| 8
| 10
| 0
| 13
| 8
| 10
| 1
| 1
| 0
| 2
|
3,994
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/moonshine/modular_moonshine.py
|
transformers.models.moonshine.modular_moonshine.MoonshineEncoder
|
import torch
from typing import Callable, Optional, Union
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPast, BaseModelOutputWithPastAndCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput
from ...modeling_attn_mask_utils import _prepare_4d_attention_mask, _prepare_4d_attention_mask_for_sdpa
import torch.nn as nn
from transformers.utils.generic import OutputRecorder, check_model_inputs
from ...processing_utils import Unpack
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple, logging
class MoonshineEncoder(MoonshinePreTrainedModel):
"""
Transformer encoder consisting of *config.num_hidden_layers* layers. Each layer is a [`MoonshineEncoderLayer`]
Args:
config: MoonshineConfig
"""
main_input_name = 'input_values'
_can_record_outputs = {'attentions': MoonshineAttention, 'hidden_states': MoonshineEncoderLayer}
def __init__(self, config: MoonshineConfig):
super().__init__(config)
self.config = config
embed_dim = config.hidden_size
self.conv1 = nn.Conv1d(1, embed_dim, kernel_size=127, stride=64, bias=False)
self.conv2 = nn.Conv1d(embed_dim, 2 * embed_dim, kernel_size=7, stride=3)
self.conv3 = nn.Conv1d(2 * embed_dim, embed_dim, kernel_size=3, stride=2)
self.groupnorm = nn.GroupNorm(num_groups=1, num_channels=embed_dim, eps=1e-05)
self.rotary_emb = MoonshineRotaryEmbedding(config=config)
self.layers = nn.ModuleList([MoonshineEncoderLayer(config, idx) for idx in range(config.encoder_num_hidden_layers)])
self.layer_norm = nn.LayerNorm(embed_dim, bias=False)
self.gradient_checkpointing = False
self.post_init()
def get_input_embeddings(self) -> nn.Module:
return self.conv1
def set_input_embeddings(self, value: nn.Module):
self.conv1 = value
@check_model_inputs
def forward(self, input_values: torch.FloatTensor, attention_mask: Optional[torch.Tensor]=None, **kwargs: Unpack[TransformersKwargs]) -> BaseModelOutputWithPast:
"""
Args:
input_values (`torch.FloatTensor` of shape `(batch_size, audio_length)`):
Float values of the raw speech waveform. Raw speech waveform can be
obtained by loading a `.flac` or `.wav` audio file into an array of type `list[float]`, a
`numpy.ndarray` or a `torch.Tensor`, *e.g.* via the torchcodec library (`pip install torchcodec`) or
the soundfile library (`pip install soundfile`). To prepare the array into
`input_values`, the [`AutoFeatureExtractor`] should be used for padding
and conversion into a tensor of type `torch.FloatTensor`.
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding indices in `input_values`. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
"""
input_values = input_values.unsqueeze(1)
hidden_states = nn.functional.tanh(self.conv1(input_values))
hidden_states = self.groupnorm(hidden_states)
hidden_states = nn.functional.gelu(self.conv2(hidden_states))
hidden_states = nn.functional.gelu(self.conv3(hidden_states))
hidden_states = hidden_states.permute(0, 2, 1)
if attention_mask is not None:
mask_len = self._get_feat_extract_output_lengths(attention_mask.shape[-1])
downsample_stride = 64 * 3 * 2
attention_mask = attention_mask[..., ::downsample_stride][..., :mask_len]
if self.config._attn_implementation == 'flash_attention_2':
attention_mask = attention_mask if (attention_mask == 0.0).any() else None
elif self.config._attn_implementation == 'sdpa':
attention_mask = _prepare_4d_attention_mask_for_sdpa(attention_mask, hidden_states.dtype)
else:
attention_mask = _prepare_4d_attention_mask(attention_mask, hidden_states.dtype)
position_ids = torch.arange(0, hidden_states.shape[1], device=hidden_states.device).unsqueeze(0)
position_embeddings = self.rotary_emb(hidden_states, position_ids)
for encoder_layer in self.layers:
hidden_states = encoder_layer(hidden_states, attention_mask=attention_mask, position_ids=position_ids, position_embeddings=position_embeddings, **kwargs)
hidden_states = self.layer_norm(hidden_states)
return BaseModelOutputWithPast(last_hidden_state=hidden_states)
|
class MoonshineEncoder(MoonshinePreTrainedModel):
'''
Transformer encoder consisting of *config.num_hidden_layers* layers. Each layer is a [`MoonshineEncoderLayer`]
Args:
config: MoonshineConfig
'''
def __init__(self, config: MoonshineConfig):
pass
def get_input_embeddings(self) -> nn.Module:
pass
def set_input_embeddings(self, value: nn.Module):
pass
@check_model_inputs
def forward(self, input_values: torch.FloatTensor, attention_mask: Optional[torch.Tensor]=None, **kwargs: Unpack[TransformersKwargs]) -> BaseModelOutputWithPast:
'''
Args:
input_values (`torch.FloatTensor` of shape `(batch_size, audio_length)`):
Float values of the raw speech waveform. Raw speech waveform can be
obtained by loading a `.flac` or `.wav` audio file into an array of type `list[float]`, a
`numpy.ndarray` or a `torch.Tensor`, *e.g.* via the torchcodec library (`pip install torchcodec`) or
the soundfile library (`pip install soundfile`). To prepare the array into
`input_values`, the [`AutoFeatureExtractor`] should be used for padding
and conversion into a tensor of type `torch.FloatTensor`.
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding indices in `input_values`. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
'''
pass
| 6
| 2
| 35
| 5
| 23
| 8
| 5
| 0.39
| 1
| 10
| 5
| 0
| 4
| 9
| 4
| 135
| 152
| 24
| 93
| 34
| 80
| 36
| 57
| 26
| 52
| 17
| 3
| 2
| 20
|
3,995
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/moonshine/modular_moonshine.py
|
transformers.models.moonshine.modular_moonshine.MoonshineEncoderLayer
|
from ..llama.modeling_llama import LlamaDecoderLayer, LlamaModel, eager_attention_forward
import torch.nn as nn
class MoonshineEncoderLayer(LlamaDecoderLayer):
def __init__(self, config: MoonshineConfig, layer_idx: int):
super().__init__(config, layer_idx)
self.self_attn = MoonshineAttention(config=config, layer_idx=layer_idx, is_causal=False, num_attention_heads=config.encoder_num_attention_heads, num_key_value_heads=config.encoder_num_key_value_heads)
self.mlp = MoonshineEncoderMLP(config, config.encoder_hidden_act)
self.input_layernorm = nn.LayerNorm(config.hidden_size, bias=False)
self.post_attention_layernorm = nn.LayerNorm(config.hidden_size, bias=False)
|
class MoonshineEncoderLayer(LlamaDecoderLayer):
def __init__(self, config: MoonshineConfig, layer_idx: int):
pass
| 2
| 0
| 14
| 2
| 12
| 0
| 1
| 0
| 1
| 5
| 3
| 0
| 1
| 4
| 1
| 13
| 15
| 2
| 13
| 6
| 11
| 0
| 7
| 6
| 5
| 1
| 2
| 0
| 1
|
3,996
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/moonshine/modular_moonshine.py
|
transformers.models.moonshine.modular_moonshine.MoonshineEncoderMLP
|
import torch
import torch.nn as nn
from ...activations import ACT2FN
class MoonshineEncoderMLP(nn.Module):
def __init__(self, config, hidden_act):
super().__init__()
self.config = config
self.activation_fn = ACT2FN[hidden_act]
self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size)
self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.fc1(hidden_states)
hidden_states = self.activation_fn(hidden_states)
hidden_states = self.fc2(hidden_states)
return hidden_states
|
class MoonshineEncoderMLP(nn.Module):
def __init__(self, config, hidden_act):
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 6
| 0
| 6
| 0
| 1
| 0
| 1
| 2
| 0
| 0
| 2
| 4
| 2
| 12
| 13
| 1
| 12
| 7
| 9
| 0
| 12
| 7
| 9
| 1
| 1
| 0
| 2
|
3,997
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/moonshine/modular_moonshine.py
|
transformers.models.moonshine.modular_moonshine.MoonshineForConditionalGeneration
|
import torch.nn as nn
from ..whisper.modeling_whisper import WhisperModel, shift_tokens_right
import torch
from typing import Callable, Optional, Union
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPast, BaseModelOutputWithPastAndCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput
from ...generation import GenerationMixin
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache
from ...processing_utils import Unpack
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple, logging
@auto_docstring(custom_intro='\n The Moonshine Model with a language modeling head. Can be used for automatic speech recognition.\n ')
class MoonshineForConditionalGeneration(MoonshinePreTrainedModel, GenerationMixin):
_tied_weights_keys = ['proj_out.weight']
def __init__(self, config: MoonshineConfig):
super().__init__(config)
self.model = MoonshineModel(config)
self.proj_out = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.post_init()
def get_encoder(self):
return self.model.get_encoder()
def get_decoder(self):
return self.model.get_decoder()
def get_output_embeddings(self):
return self.proj_out
def set_output_embeddings(self, new_embeddings):
self.proj_out = new_embeddings
def get_input_embeddings(self) -> nn.Module:
return self.model.get_input_embeddings()
@can_return_tuple
@auto_docstring
def forward(self, input_values: Optional[torch.FloatTensor]=None, attention_mask: Optional[torch.LongTensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.LongTensor]=None, encoder_outputs: Optional[tuple[tuple[torch.FloatTensor]]]=None, past_key_values: Optional[Union[EncoderDecoderCache, tuple[torch.FloatTensor]]]=None, decoder_inputs_embeds: Optional[tuple[torch.FloatTensor]]=None, decoder_position_ids: Optional[tuple[torch.LongTensor]]=None, use_cache: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, labels: Optional[torch.LongTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Seq2SeqLMOutput:
"""
input_values (`torch.FloatTensor` of shape `(batch_size, audio_length)`):
Float values of the raw speech waveform. Raw speech waveform can be
obtained by loading a `.flac` or `.wav` audio file into an array of type `list[float]`, a
`numpy.ndarray` or a `torch.Tensor`, *e.g.* via the torchcodec library (`pip install torchcodec`) or
the soundfile library (`pip install soundfile`). To prepare the array into
`input_values`, the [`AutoFeatureExtractor`] should be used for padding
and conversion into a tensor of type `torch.FloatTensor`.
decoder_position_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`):
Indices of positions of each input sequence tokens in the position embeddings.
Used to calculate the position embeddings up to `config.decoder_config.max_position_embeddings`
Example:
```python
>>> import torch
>>> from transformers import AutoProcessor, MoonshineForConditionalGeneration
>>> from datasets import load_dataset
>>> processor = AutoProcessor.from_pretrained("UsefulSensors/moonshine-tiny")
>>> model = MoonshineForConditionalGeneration.from_pretrained("UsefulSensors/moonshine-tiny")
>>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
>>> inputs = processor(ds[0]["audio"]["array"], return_tensors="pt")
>>> input_values = inputs.input_values
>>> generated_ids = model.generate(input_values, max_new_tokens=100)
>>> transcription = processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
>>> transcription
'Mr. Quilter is the apostle of the middle classes, and we are glad to welcome his gospel.'
```"""
if labels is not None:
if decoder_input_ids is None and decoder_inputs_embeds is None:
decoder_input_ids = shift_tokens_right(labels, self.config.pad_token_id, self.config.decoder_start_token_id)
outputs: Seq2SeqModelOutput = self.model(input_values, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, encoder_outputs=encoder_outputs, decoder_attention_mask=decoder_attention_mask, past_key_values=past_key_values, decoder_inputs_embeds=decoder_inputs_embeds, decoder_position_ids=decoder_position_ids, use_cache=use_cache, cache_position=cache_position, **kwargs)
logits = self.proj_out(outputs.last_hidden_state)
loss = None
if labels is not None:
loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.vocab_size)
return Seq2SeqLMOutput(loss=loss, logits=logits, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions)
|
@auto_docstring(custom_intro='\n The Moonshine Model with a language modeling head. Can be used for automatic speech recognition.\n ')
class MoonshineForConditionalGeneration(MoonshinePreTrainedModel, GenerationMixin):
def __init__(self, config: MoonshineConfig):
pass
def get_encoder(self):
pass
def get_decoder(self):
pass
def get_output_embeddings(self):
pass
def set_output_embeddings(self, new_embeddings):
pass
def get_input_embeddings(self) -> nn.Module:
pass
@can_return_tuple
@auto_docstring
def forward(self, input_values: Optional[torch.FloatTensor]=None, attention_mask: Optional[torch.LongTensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.LongTensor]=None, encoder_outputs: Optional[tuple[tuple[torch.FloatTensor]]]=None, past_key_values: Optional[Union[EncoderDecoderCache, tuple[torch.FloatTensor]]]=None, decoder_inputs_embeds: Optional[tuple[torch.FloatTensor]]=None, decoder_position_ids: Optional[tuple[torch.LongTensor]]=None, use_cache: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, labels: Optional[torch.LongTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Seq2SeqLMOutput:
'''
input_values (`torch.FloatTensor` of shape `(batch_size, audio_length)`):
Float values of the raw speech waveform. Raw speech waveform can be
obtained by loading a `.flac` or `.wav` audio file into an array of type `list[float]`, a
`numpy.ndarray` or a `torch.Tensor`, *e.g.* via the torchcodec library (`pip install torchcodec`) or
the soundfile library (`pip install soundfile`). To prepare the array into
`input_values`, the [`AutoFeatureExtractor`] should be used for padding
and conversion into a tensor of type `torch.FloatTensor`.
decoder_position_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`):
Indices of positions of each input sequence tokens in the position embeddings.
Used to calculate the position embeddings up to `config.decoder_config.max_position_embeddings`
Example:
```python
>>> import torch
>>> from transformers import AutoProcessor, MoonshineForConditionalGeneration
>>> from datasets import load_dataset
>>> processor = AutoProcessor.from_pretrained("UsefulSensors/moonshine-tiny")
>>> model = MoonshineForConditionalGeneration.from_pretrained("UsefulSensors/moonshine-tiny")
>>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
>>> inputs = processor(ds[0]["audio"]["array"], return_tensors="pt")
>>> input_values = inputs.input_values
>>> generated_ids = model.generate(input_values, max_new_tokens=100)
>>> transcription = processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
>>> transcription
'Mr. Quilter is the apostle of the middle classes, and we are glad to welcome his gospel.'
```'''
pass
| 11
| 1
| 15
| 2
| 10
| 3
| 2
| 0.29
| 2
| 7
| 4
| 0
| 7
| 2
| 7
| 138
| 118
| 21
| 75
| 32
| 49
| 22
| 31
| 15
| 23
| 7
| 3
| 2
| 13
|
3,998
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/moonshine/modular_moonshine.py
|
transformers.models.moonshine.modular_moonshine.MoonshineModel
|
from ...processing_utils import Unpack
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache
import torch.nn as nn
from ..whisper.modeling_whisper import WhisperModel, shift_tokens_right
from typing import Callable, Optional, Union
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple, logging
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPast, BaseModelOutputWithPastAndCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput
import torch
class MoonshineModel(WhisperModel):
@can_return_tuple
@auto_docstring
def forward(self, input_values: Optional[torch.FloatTensor]=None, attention_mask: Optional[torch.LongTensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.LongTensor]=None, encoder_outputs: Optional[tuple[tuple[torch.FloatTensor]]]=None, past_key_values: Optional[Union[EncoderDecoderCache, tuple[torch.FloatTensor]]]=None, decoder_inputs_embeds: Optional[tuple[torch.FloatTensor]]=None, decoder_position_ids: Optional[tuple[torch.LongTensor]]=None, use_cache: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Seq2SeqModelOutput:
"""
input_values (`torch.FloatTensor` of shape `(batch_size, audio_length)`):
Float values of the raw speech waveform. Raw speech waveform can be
obtained by loading a `.flac` or `.wav` audio file into an array of type `list[float]`, a
`numpy.ndarray` or a `torch.Tensor`, *e.g.* via the torchcodec library (`pip install torchcodec`) or
the soundfile library (`pip install soundfile`). To prepare the array into
`input_values`, the [`AutoFeatureExtractor`] should be used for padding
and conversion into a tensor of type `torch.FloatTensor`.
decoder_position_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`):
Indices of positions of each input sequence tokens in the position embeddings.
Used to calculate the position embeddings up to `config.decoder_config.max_position_embeddings`
Example:
```python
>>> import torch
>>> from transformers import AutoFeatureExtractor, MoonshineModel
>>> from datasets import load_dataset
>>> model = MoonshineModel.from_pretrained("UsefulSensors/moonshine-tiny")
>>> feature_extractor = AutoFeatureExtractor.from_pretrained("UsefulSensors/moonshine-tiny")
>>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
>>> inputs = feature_extractor(ds[0]["audio"]["array"], return_tensors="pt")
>>> input_values = inputs.input_values
>>> decoder_input_ids = torch.tensor([[1, 1]]) * model.config.decoder_start_token_id
>>> last_hidden_state = model(input_values, decoder_input_ids=decoder_input_ids).last_hidden_state
>>> list(last_hidden_state.shape)
[1, 2, 288]
```
"""
if encoder_outputs is None:
encoder_outputs: BaseModelOutput = self.encoder(input_values, attention_mask=attention_mask, **kwargs)
decoder_outputs: BaseModelOutputWithPastAndCrossAttentions = self.decoder(input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, encoder_attention_mask=attention_mask, encoder_hidden_states=encoder_outputs.last_hidden_state, past_key_values=past_key_values, inputs_embeds=decoder_inputs_embeds, position_ids=decoder_position_ids, use_cache=use_cache, cache_position=cache_position, **kwargs)
return Seq2SeqModelOutput(last_hidden_state=decoder_outputs.last_hidden_state, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions)
|
class MoonshineModel(WhisperModel):
@can_return_tuple
@auto_docstring
def forward(self, input_values: Optional[torch.FloatTensor]=None, attention_mask: Optional[torch.LongTensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.LongTensor]=None, encoder_outputs: Optional[tuple[tuple[torch.FloatTensor]]]=None, past_key_values: Optional[Union[EncoderDecoderCache, tuple[torch.FloatTensor]]]=None, decoder_inputs_embeds: Optional[tuple[torch.FloatTensor]]=None, decoder_position_ids: Optional[tuple[torch.LongTensor]]=None, use_cache: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Seq2SeqModelOutput:
'''
input_values (`torch.FloatTensor` of shape `(batch_size, audio_length)`):
Float values of the raw speech waveform. Raw speech waveform can be
obtained by loading a `.flac` or `.wav` audio file into an array of type `list[float]`, a
`numpy.ndarray` or a `torch.Tensor`, *e.g.* via the torchcodec library (`pip install torchcodec`) or
the soundfile library (`pip install soundfile`). To prepare the array into
`input_values`, the [`AutoFeatureExtractor`] should be used for padding
and conversion into a tensor of type `torch.FloatTensor`.
decoder_position_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`):
Indices of positions of each input sequence tokens in the position embeddings.
Used to calculate the position embeddings up to `config.decoder_config.max_position_embeddings`
Example:
```python
>>> import torch
>>> from transformers import AutoFeatureExtractor, MoonshineModel
>>> from datasets import load_dataset
>>> model = MoonshineModel.from_pretrained("UsefulSensors/moonshine-tiny")
>>> feature_extractor = AutoFeatureExtractor.from_pretrained("UsefulSensors/moonshine-tiny")
>>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
>>> inputs = feature_extractor(ds[0]["audio"]["array"], return_tensors="pt")
>>> input_values = inputs.input_values
>>> decoder_input_ids = torch.tensor([[1, 1]]) * model.config.decoder_start_token_id
>>> last_hidden_state = model(input_values, decoder_input_ids=decoder_input_ids).last_hidden_state
>>> list(last_hidden_state.shape)
[1, 2, 288]
```
'''
pass
| 4
| 1
| 84
| 5
| 62
| 17
| 10
| 0.26
| 1
| 5
| 3
| 0
| 1
| 0
| 1
| 11
| 87
| 5
| 65
| 19
| 46
| 17
| 13
| 3
| 11
| 10
| 3
| 1
| 10
|
3,999
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/moonshine/modular_moonshine.py
|
transformers.models.moonshine.modular_moonshine.MoonshinePreTrainedModel
|
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple, logging
import torch.nn as nn
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
import torch
@auto_docstring
class MoonshinePreTrainedModel(PreTrainedModel):
config: MoonshineConfig
base_model_prefix = 'model'
main_input_name = 'input_values'
supports_gradient_checkpointing = True
_no_split_modules = ['MoonshineEncoderLayer', 'MoonshineDecoderLayer']
_supports_flash_attn = True
_supports_sdpa = True
_can_compile_fullgraph = True
def _get_feat_extract_output_lengths(self, input_lengths: torch.LongTensor):
"""
Computes the output length of the convolutional layers
"""
output_conv1_length = int((input_lengths - 127) / 64 + 1)
output_conv2_length = int((output_conv1_length - 7) / 3 + 1)
output_conv3_length = int((output_conv2_length - 3) / 2 + 1)
return output_conv3_length
|
@auto_docstring
class MoonshinePreTrainedModel(PreTrainedModel):
def _get_feat_extract_output_lengths(self, input_lengths: torch.LongTensor):
'''
Computes the output length of the convolutional layers
'''
pass
| 3
| 1
| 10
| 1
| 8
| 2
| 3
| 0.12
| 1
| 1
| 0
| 2
| 2
| 0
| 2
| 131
| 31
| 3
| 25
| 16
| 22
| 3
| 24
| 16
| 21
| 5
| 2
| 2
| 6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.