id
int64 0
328k
| repository_name
stringlengths 7
58
| file_path
stringlengths 9
302
| class_name
stringlengths 5
256
| human_written_code
stringlengths 16
2.16M
| class_skeleton
stringlengths 18
1.49M
⌀ | total_program_units
int64 1
1.76k
| total_doc_str
int64 0
771
| AvgCountLine
float64 0
7.89k
| AvgCountLineBlank
float64 0
297
| AvgCountLineCode
float64 0
7.89k
| AvgCountLineComment
float64 0
7.89k
| AvgCyclomatic
float64 0
130
| CommentToCodeRatio
float64 0
168
| CountClassBase
float64 0
40
| CountClassCoupled
float64 0
583
| CountClassCoupledModified
float64 0
575
| CountClassDerived
float64 0
5.35k
| CountDeclInstanceMethod
float64 0
529
| CountDeclInstanceVariable
float64 0
296
| CountDeclMethod
float64 0
599
| CountDeclMethodAll
float64 0
1.12k
| CountLine
float64 1
40.4k
| CountLineBlank
float64 0
8.16k
| CountLineCode
float64 1
25.7k
| CountLineCodeDecl
float64 1
8.15k
| CountLineCodeExe
float64 0
24.2k
| CountLineComment
float64 0
16.5k
| CountStmt
float64 1
9.71k
| CountStmtDecl
float64 1
8.15k
| CountStmtExe
float64 0
9.69k
| MaxCyclomatic
float64 0
759
| MaxInheritanceTree
float64 0
16
| MaxNesting
float64 0
34
| SumCyclomatic
float64 0
2.9k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1,700
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/efficientformer/image_processing_efficientformer.py
|
transformers.models.deprecated.efficientformer.image_processing_efficientformer.EfficientFormerImageProcessor
|
import numpy as np
from ....utils import TensorType, logging
from ....image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ....image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, infer_channel_dimension_format, is_batched, is_scaled_image, to_numpy_array, valid_images, validate_kwargs, validate_preprocess_arguments
from ....image_transforms import get_resize_output_image_size, resize, to_channel_dimension_format
from typing import Optional, Union
class EfficientFormerImageProcessor(BaseImageProcessor):
"""
Constructs a EfficientFormer image processor.
Args:
do_resize (`bool`, *optional*, defaults to `True`):
Whether to resize the image's (height, width) dimensions to the specified `(size["height"],
size["width"])`. Can be overridden by the `do_resize` parameter in the `preprocess` method.
size (`dict`, *optional*, defaults to `{"height": 224, "width": 224}`):
Size of the output image after resizing. Can be overridden by the `size` parameter in the `preprocess`
method.
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`):
Resampling filter to use if resizing the image. Can be overridden by the `resample` parameter in the
`preprocess` method.
do_center_crop (`bool`, *optional*, defaults to `True`):
Whether to center crop the image to the specified `crop_size`. Can be overridden by `do_center_crop` in the
`preprocess` method.
crop_size (`dict[str, int]` *optional*, defaults to 224):
Size of the output image after applying `center_crop`. Can be overridden by `crop_size` in the `preprocess`
method.
do_rescale (`bool`, *optional*, defaults to `True`):
Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale`
parameter in the `preprocess` method.
rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the
`preprocess` method.
do_normalize:
Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess`
method.
image_mean (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`):
Mean to use if normalizing the image. This is a float or list of floats the length of the number of
channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
image_std (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`):
Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
"""
model_input_names = ['pixel_values']
def __init__(self, do_resize: bool=True, size: Optional[dict[str, int]]=None, resample: PILImageResampling=PILImageResampling.BICUBIC, do_center_crop: bool=True, do_rescale: bool=True, rescale_factor: Union[int, float]=1 / 255, crop_size: Optional[dict[str, int]]=None, do_normalize: bool=True, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, **kwargs) -> None:
super().__init__(**kwargs)
size = size if size is not None else {'height': 224, 'width': 224}
size = get_size_dict(size)
crop_size = crop_size if crop_size is not None else {'height': 224, 'width': 224}
crop_size = get_size_dict(crop_size, default_to_square=True, param_name='crop_size')
self.do_resize = do_resize
self.do_rescale = do_rescale
self.do_normalize = do_normalize
self.do_center_crop = do_center_crop
self.crop_size = crop_size
self.size = size
self.resample = resample
self.rescale_factor = rescale_factor
self.image_mean = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
self.image_std = image_std if image_std is not None else IMAGENET_DEFAULT_STD
self._valid_processor_keys = ['images', 'do_resize', 'size', 'resample', 'do_center_crop', 'crop_size', 'do_rescale', 'rescale_factor', 'do_normalize', 'image_mean', 'image_std', 'return_tensors', 'data_format', 'input_data_format']
def resize(self, image: np.ndarray, size: dict[str, int], resample: PILImageResampling=PILImageResampling.BILINEAR, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray:
"""
Resize an image to `(size["height"], size["width"])`.
Args:
image (`np.ndarray`):
Image to resize.
size (`dict[str, int]`):
Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image.
resample:
`PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BILINEAR`.
data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the output image. If unset, the channel dimension format of the input
image is used. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
Returns:
`np.ndarray`: The resized image.
"""
size = get_size_dict(size)
if 'shortest_edge' in size:
size = get_resize_output_image_size(image, size=size['shortest_edge'], default_to_square=False, input_data_format=input_data_format)
elif 'height' in size and 'width' in size:
size = (size['height'], size['width'])
else:
raise ValueError(f"Size must contain 'height' and 'width' keys or 'shortest_edge' key. Got {size.keys()}")
return resize(image, size=size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs)
def preprocess(self, images: ImageInput, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, resample: Optional[PILImageResampling]=None, do_center_crop: Optional[bool]=None, crop_size: Optional[int]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, return_tensors: Optional[Union[str, TensorType]]=None, data_format: Union[str, ChannelDimension]=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> BatchFeature:
"""
Preprocess an image or batch of images.
Args:
images (`ImageInput`):
Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
passing in images with pixel values between 0 and 1, set `do_rescale=False`.
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
Whether to resize the image.
size (`dict[str, int]`, *optional*, defaults to `self.size`):
Dictionary in the format `{"height": h, "width": w}` specifying the size of the output image after
resizing.
resample (`PILImageResampling` filter, *optional*, defaults to `self.resample`):
`PILImageResampling` filter to use if resizing the image e.g. `PILImageResampling.BILINEAR`. Only has
an effect if `do_resize` is set to `True`.
do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`):
Whether to center crop the image.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the image values between [0 - 1].
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
crop_size (`dict[str, int]`, *optional*, defaults to `self.crop_size`):
Size of the center crop. Only has an effect if `do_center_crop` is set to `True`.
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
Whether to normalize the image.
image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`):
Image mean to use if `do_normalize` is set to `True`.
image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`):
Image standard deviation to use if `do_normalize` is set to `True`.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- Unset: Use the channel dimension format of the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
"""
do_resize = do_resize if do_resize is not None else self.do_resize
do_rescale = do_rescale if do_rescale is not None else self.do_rescale
do_normalize = do_normalize if do_normalize is not None else self.do_normalize
do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop
crop_size = crop_size if crop_size is not None else self.crop_size
crop_size = get_size_dict(crop_size, param_name='crop_size', default_to_square=True)
resample = resample if resample is not None else self.resample
rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
image_mean = image_mean if image_mean is not None else self.image_mean
image_std = image_std if image_std is not None else self.image_std
size = size if size is not None else self.size
size_dict = get_size_dict(size)
validate_kwargs(captured_kwargs=kwargs.keys(), valid_processor_keys=self._valid_processor_keys)
if not is_batched(images):
images = [images]
if not valid_images(images):
raise ValueError('Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, or torch.Tensor')
validate_preprocess_arguments(do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_center_crop=do_center_crop, crop_size=crop_size, do_resize=do_resize, size=size, resample=resample)
images = [to_numpy_array(image) for image in images]
if do_rescale and is_scaled_image(images[0]):
logger.warning_once('It looks like you are trying to rescale already rescaled images. If the input images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again.')
if input_data_format is None:
input_data_format = infer_channel_dimension_format(images[0])
if do_resize:
images = [self.resize(image=image, size=size_dict, resample=resample, input_data_format=input_data_format) for image in images]
if do_center_crop:
images = [self.center_crop(image=image, size=crop_size, input_data_format=input_data_format) for image in images]
if do_rescale:
images = [self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format) for image in images]
if do_normalize:
images = [self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format) for image in images]
images = [to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images]
data = {'pixel_values': images}
return BatchFeature(data=data, tensor_type=return_tensors)
|
class EfficientFormerImageProcessor(BaseImageProcessor):
'''
Constructs a EfficientFormer image processor.
Args:
do_resize (`bool`, *optional*, defaults to `True`):
Whether to resize the image's (height, width) dimensions to the specified `(size["height"],
size["width"])`. Can be overridden by the `do_resize` parameter in the `preprocess` method.
size (`dict`, *optional*, defaults to `{"height": 224, "width": 224}`):
Size of the output image after resizing. Can be overridden by the `size` parameter in the `preprocess`
method.
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`):
Resampling filter to use if resizing the image. Can be overridden by the `resample` parameter in the
`preprocess` method.
do_center_crop (`bool`, *optional*, defaults to `True`):
Whether to center crop the image to the specified `crop_size`. Can be overridden by `do_center_crop` in the
`preprocess` method.
crop_size (`dict[str, int]` *optional*, defaults to 224):
Size of the output image after applying `center_crop`. Can be overridden by `crop_size` in the `preprocess`
method.
do_rescale (`bool`, *optional*, defaults to `True`):
Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale`
parameter in the `preprocess` method.
rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the
`preprocess` method.
do_normalize:
Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess`
method.
image_mean (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`):
Mean to use if normalizing the image. This is a float or list of floats the length of the number of
channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
image_std (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`):
Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
'''
def __init__(self, do_resize: bool=True, size: Optional[dict[str, int]]=None, resample: PILImageResampling=PILImageResampling.BICUBIC, do_center_crop: bool=True, do_rescale: bool=True, rescale_factor: Union[int, float]=1 / 255, crop_size: Optional[dict[str, int]]=None, do_normalize: bool=True, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, **kwargs) -> None:
pass
def resize(self, image: np.ndarray, size: dict[str, int], resample: PILImageResampling=PILImageResampling.BILINEAR, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray:
'''
Resize an image to `(size["height"], size["width"])`.
Args:
image (`np.ndarray`):
Image to resize.
size (`dict[str, int]`):
Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image.
resample:
`PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BILINEAR`.
data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the output image. If unset, the channel dimension format of the input
image is used. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
Returns:
`np.ndarray`: The resized image.
'''
pass
def preprocess(self, images: ImageInput, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, resample: Optional[PILImageResampling]=None, do_center_crop: Optional[bool]=None, crop_size: Optional[int]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, return_tensors: Optional[Union[str, TensorType]]=None, data_format: Union[str, ChannelDimension]=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> BatchFeature:
'''
Preprocess an image or batch of images.
Args:
images (`ImageInput`):
Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
passing in images with pixel values between 0 and 1, set `do_rescale=False`.
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
Whether to resize the image.
size (`dict[str, int]`, *optional*, defaults to `self.size`):
Dictionary in the format `{"height": h, "width": w}` specifying the size of the output image after
resizing.
resample (`PILImageResampling` filter, *optional*, defaults to `self.resample`):
`PILImageResampling` filter to use if resizing the image e.g. `PILImageResampling.BILINEAR`. Only has
an effect if `do_resize` is set to `True`.
do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`):
Whether to center crop the image.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the image values between [0 - 1].
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
crop_size (`dict[str, int]`, *optional*, defaults to `self.crop_size`):
Size of the center crop. Only has an effect if `do_center_crop` is set to `True`.
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
Whether to normalize the image.
image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`):
Image mean to use if `do_normalize` is set to `True`.
image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`):
Image standard deviation to use if `do_normalize` is set to `True`.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- Unset: Use the channel dimension format of the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
'''
pass
| 4
| 3
| 78
| 6
| 49
| 23
| 9
| 0.69
| 1
| 8
| 2
| 0
| 3
| 11
| 3
| 23
| 275
| 22
| 150
| 56
| 108
| 103
| 61
| 18
| 57
| 19
| 3
| 1
| 27
|
1,701
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/efficientformer/modeling_efficientformer.py
|
transformers.models.deprecated.efficientformer.modeling_efficientformer.EfficientFormerConvMlp
|
from typing import Optional, Union
from ....activations import ACT2FN
from .configuration_efficientformer import EfficientFormerConfig
import torch
from torch import nn
class EfficientFormerConvMlp(nn.Module):
def __init__(self, config: EfficientFormerConfig, in_features: int, hidden_features: Optional[int]=None, out_features: Optional[int]=None, drop: float=0.0):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.convolution1 = nn.Conv2d(in_features, hidden_features, 1)
self.activation = ACT2FN[config.hidden_act]
self.convolution2 = nn.Conv2d(hidden_features, out_features, 1)
self.dropout = nn.Dropout(drop)
self.batchnorm_before = nn.BatchNorm2d(hidden_features, eps=config.batch_norm_eps)
self.batchnorm_after = nn.BatchNorm2d(out_features, eps=config.batch_norm_eps)
def forward(self, hidden_state: torch.Tensor) -> torch.Tensor:
hidden_state = self.convolution1(hidden_state)
hidden_state = self.batchnorm_before(hidden_state)
hidden_state = self.activation(hidden_state)
hidden_state = self.dropout(hidden_state)
hidden_state = self.convolution2(hidden_state)
hidden_state = self.batchnorm_after(hidden_state)
hidden_state = self.dropout(hidden_state)
return hidden_state
|
class EfficientFormerConvMlp(nn.Module):
def __init__(self, config: EfficientFormerConfig, in_features: int, hidden_features: Optional[int]=None, out_features: Optional[int]=None, drop: float=0.0):
pass
def forward(self, hidden_state: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 16
| 3
| 13
| 0
| 1
| 0
| 1
| 5
| 1
| 0
| 2
| 6
| 2
| 12
| 33
| 6
| 27
| 16
| 17
| 0
| 20
| 9
| 17
| 1
| 1
| 0
| 2
|
1,702
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/efficientformer/modeling_efficientformer.py
|
transformers.models.deprecated.efficientformer.modeling_efficientformer.EfficientFormerConvStem
|
import torch
from torch import nn
from .configuration_efficientformer import EfficientFormerConfig
class EfficientFormerConvStem(nn.Module):
def __init__(self, config: EfficientFormerConfig, out_channels: int):
super().__init__()
self.convolution1 = nn.Conv2d(config.num_channels, out_channels // 2, kernel_size=3, stride=2, padding=1)
self.batchnorm_before = nn.BatchNorm2d(out_channels // 2, eps=config.batch_norm_eps)
self.convolution2 = nn.Conv2d(out_channels // 2, out_channels, kernel_size=3, stride=2, padding=1)
self.batchnorm_after = nn.BatchNorm2d(out_channels, eps=config.batch_norm_eps)
self.activation = nn.ReLU()
def forward(self, pixel_values: torch.Tensor) -> torch.Tensor:
features = self.batchnorm_before(self.convolution1(pixel_values))
features = self.activation(features)
features = self.batchnorm_after(self.convolution2(features))
features = self.activation(features)
return features
|
class EfficientFormerConvStem(nn.Module):
def __init__(self, config: EfficientFormerConfig, out_channels: int):
pass
def forward(self, pixel_values: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 9
| 2
| 7
| 0
| 1
| 0
| 1
| 4
| 1
| 0
| 2
| 5
| 2
| 12
| 19
| 5
| 14
| 9
| 11
| 0
| 14
| 9
| 11
| 1
| 1
| 0
| 2
|
1,703
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/efficientformer/modeling_efficientformer.py
|
transformers.models.deprecated.efficientformer.modeling_efficientformer.EfficientFormerDenseMlp
|
import torch
from ....activations import ACT2FN
from .configuration_efficientformer import EfficientFormerConfig
from torch import nn
from typing import Optional, Union
class EfficientFormerDenseMlp(nn.Module):
def __init__(self, config: EfficientFormerConfig, in_features: int, hidden_features: Optional[int]=None, out_features: Optional[int]=None):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.linear_in = nn.Linear(in_features, hidden_features)
self.activation = ACT2FN[config.hidden_act]
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.linear_out = nn.Linear(hidden_features, out_features)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.linear_in(hidden_states)
hidden_states = self.activation(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.linear_out(hidden_states)
hidden_states = self.dropout(hidden_states)
return hidden_states
|
class EfficientFormerDenseMlp(nn.Module):
def __init__(self, config: EfficientFormerConfig, in_features: int, hidden_features: Optional[int]=None, out_features: Optional[int]=None):
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 12
| 1
| 11
| 0
| 1
| 0
| 1
| 4
| 1
| 0
| 2
| 4
| 2
| 12
| 25
| 3
| 22
| 13
| 13
| 0
| 16
| 7
| 13
| 1
| 1
| 0
| 2
|
1,704
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/efficientformer/modeling_efficientformer.py
|
transformers.models.deprecated.efficientformer.modeling_efficientformer.EfficientFormerDropPath
|
import torch
from typing import Optional, Union
from torch import nn
class EfficientFormerDropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
def __init__(self, drop_prob: Optional[float]=None) -> None:
super().__init__()
self.drop_prob = drop_prob
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
return drop_path(hidden_states, self.drop_prob, self.training)
def extra_repr(self) -> str:
return f'p={self.drop_prob}'
|
class EfficientFormerDropPath(nn.Module):
'''Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).'''
def __init__(self, drop_prob: Optional[float]=None) -> None:
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
def extra_repr(self) -> str:
pass
| 4
| 1
| 2
| 0
| 2
| 0
| 1
| 0.13
| 1
| 4
| 0
| 0
| 3
| 1
| 3
| 13
| 12
| 3
| 8
| 5
| 4
| 1
| 8
| 5
| 4
| 1
| 1
| 0
| 3
|
1,705
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/efficientformer/modeling_efficientformer.py
|
transformers.models.deprecated.efficientformer.modeling_efficientformer.EfficientFormerEncoder
|
from .configuration_efficientformer import EfficientFormerConfig
from torch import nn
import torch
from ....modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, ImageClassifierOutput
class EfficientFormerEncoder(nn.Module):
def __init__(self, config: EfficientFormerConfig):
super().__init__()
self.config = config
num_intermediate_stages = len(config.depths) - 1
downsamples = [config.downsamples[i] or config.hidden_sizes[i] != config.hidden_sizes[i + 1] for i in range(num_intermediate_stages)]
intermediate_stages = []
for i in range(num_intermediate_stages):
intermediate_stages.append(EfficientFormerIntermediateStage(config, i))
if downsamples[i]:
intermediate_stages.append(EfficientFormerPatchEmbeddings(config, config.hidden_sizes[i], config.hidden_sizes[i + 1]))
self.intermediate_stages = nn.ModuleList(intermediate_stages)
self.last_stage = EfficientFormerLastStage(config)
def forward(self, hidden_states: torch.Tensor, output_hidden_states: bool=False, output_attentions: bool=False, return_dict: bool=True) -> BaseModelOutput:
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
for layer_module in self.intermediate_stages:
hidden_states = layer_module(hidden_states)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_output = self.last_stage(hidden_states, output_attentions=output_attentions)
if output_attentions:
all_self_attentions = all_self_attentions + layer_output[1:]
if output_hidden_states:
all_hidden_states = all_hidden_states + (layer_output[0],)
if not return_dict:
return tuple((v for v in [layer_output[0], all_hidden_states, all_self_attentions] if v is not None))
return BaseModelOutput(last_hidden_state=layer_output[0], hidden_states=all_hidden_states, attentions=all_self_attentions)
|
class EfficientFormerEncoder(nn.Module):
def __init__(self, config: EfficientFormerConfig):
pass
def forward(self, hidden_states: torch.Tensor, output_hidden_states: bool=False, output_attentions: bool=False, return_dict: bool=True) -> BaseModelOutput:
pass
| 3
| 0
| 27
| 5
| 22
| 0
| 6
| 0
| 1
| 10
| 5
| 0
| 2
| 3
| 2
| 12
| 55
| 10
| 45
| 19
| 36
| 0
| 30
| 13
| 27
| 9
| 1
| 2
| 12
|
1,706
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/efficientformer/modeling_efficientformer.py
|
transformers.models.deprecated.efficientformer.modeling_efficientformer.EfficientFormerFlat
|
from torch import nn
import torch
class EfficientFormerFlat(nn.Module):
def __init__(self):
super().__init__()
def forward(self, hidden_states: torch.Tensor) -> tuple[torch.Tensor]:
hidden_states = hidden_states.flatten(2).transpose(1, 2)
return hidden_states
|
class EfficientFormerFlat(nn.Module):
def __init__(self):
pass
def forward(self, hidden_states: torch.Tensor) -> tuple[torch.Tensor]:
pass
| 3
| 0
| 3
| 0
| 3
| 0
| 1
| 0
| 1
| 2
| 0
| 0
| 2
| 0
| 2
| 12
| 7
| 1
| 6
| 3
| 3
| 0
| 6
| 3
| 3
| 1
| 1
| 0
| 2
|
1,707
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/efficientformer/modeling_efficientformer.py
|
transformers.models.deprecated.efficientformer.modeling_efficientformer.EfficientFormerForImageClassification
|
import torch
from torch import nn
from typing import Optional, Union
from .configuration_efficientformer import EfficientFormerConfig
from ....modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, ImageClassifierOutput
from ....utils import ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
@add_start_docstrings('\n EfficientFormer Model transformer with an image classification head on top (a linear layer on top of the final\n hidden state of the [CLS] token) e.g. for ImageNet.\n ', EFFICIENTFORMER_START_DOCSTRING)
class EfficientFormerForImageClassification(EfficientFormerPreTrainedModel):
def __init__(self, config: EfficientFormerConfig):
super().__init__(config)
self.num_labels = config.num_labels
self.efficientformer = EfficientFormerModel(config)
self.classifier = nn.Linear(config.hidden_sizes[-1], config.num_labels) if config.num_labels > 0 else nn.Identity()
self.post_init()
@add_start_docstrings_to_model_forward(EFFICIENTFORMER_INPUTS_DOCSTRING)
@add_code_sample_docstrings(checkpoint=_IMAGE_CLASS_CHECKPOINT, output_type=ImageClassifierOutput, config_class=_CONFIG_FOR_DOC, expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT)
def forward(self, pixel_values: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, ImageClassifierOutput]:
"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.efficientformer(pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
sequence_output = outputs[0]
logits = self.classifier(sequence_output.mean(-2))
loss = None
if labels is not None:
loss = self.loss_function(labels, logits, self.config)
if not return_dict:
output = (logits,) + outputs[1:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
|
@add_start_docstrings('\n EfficientFormer Model transformer with an image classification head on top (a linear layer on top of the final\n hidden state of the [CLS] token) e.g. for ImageNet.\n ', EFFICIENTFORMER_START_DOCSTRING)
class EfficientFormerForImageClassification(EfficientFormerPreTrainedModel):
def __init__(self, config: EfficientFormerConfig):
pass
@add_start_docstrings_to_model_forward(EFFICIENTFORMER_INPUTS_DOCSTRING)
@add_code_sample_docstrings(checkpoint=_IMAGE_CLASS_CHECKPOINT, output_type=ImageClassifierOutput, config_class=_CONFIG_FOR_DOC, expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT)
def forward(self, pixel_values: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, ImageClassifierOutput]:
'''
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
'''
pass
| 6
| 1
| 37
| 5
| 28
| 4
| 7
| 0.13
| 1
| 8
| 3
| 0
| 2
| 3
| 2
| 132
| 82
| 11
| 63
| 20
| 46
| 8
| 32
| 12
| 29
| 12
| 3
| 3
| 14
|
1,708
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/efficientformer/modeling_efficientformer.py
|
transformers.models.deprecated.efficientformer.modeling_efficientformer.EfficientFormerForImageClassificationWithTeacher
|
from ....utils import ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_efficientformer import EfficientFormerConfig
import torch
from torch import nn
from typing import Optional, Union
@add_start_docstrings('\n EfficientFormer Model transformer with image classification heads on top (a linear layer on top of the final hidden\n state of the [CLS] token and a linear layer on top of the final hidden state of the distillation token) e.g. for\n ImageNet.\n\n <Tip warning={true}>\n\n This model supports inference-only. Fine-tuning with distillation (i.e. with a teacher) is not yet\n supported.\n\n </Tip>\n ', EFFICIENTFORMER_START_DOCSTRING)
class EfficientFormerForImageClassificationWithTeacher(EfficientFormerPreTrainedModel):
def __init__(self, config: EfficientFormerConfig):
super().__init__(config)
self.num_labels = config.num_labels
self.efficientformer = EfficientFormerModel(config)
self.classifier = nn.Linear(config.hidden_size, config.num_labels) if config.num_labels > 0 else nn.Identity()
self.distillation_classifier = nn.Linear(config.hidden_size, config.num_labels) if config.num_labels > 0 else nn.Identity()
self.post_init()
@add_start_docstrings_to_model_forward(EFFICIENTFORMER_INPUTS_DOCSTRING)
@add_code_sample_docstrings(checkpoint=_IMAGE_CLASS_CHECKPOINT, output_type=EfficientFormerForImageClassificationWithTeacherOutput, config_class=_CONFIG_FOR_DOC, expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT)
def forward(self, pixel_values: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, EfficientFormerForImageClassificationWithTeacherOutput]:
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.efficientformer(pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
sequence_output = outputs[0]
cls_logits = self.classifier(sequence_output.mean(-2))
distillation_logits = self.distillation_classifier(sequence_output.mean(-2))
logits = (cls_logits + distillation_logits) / 2
if not return_dict:
output = (logits, cls_logits, distillation_logits) + outputs[1:]
return output
return EfficientFormerForImageClassificationWithTeacherOutput(logits=logits, cls_logits=cls_logits, distillation_logits=distillation_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
|
@add_start_docstrings('\n EfficientFormer Model transformer with image classification heads on top (a linear layer on top of the final hidden\n state of the [CLS] token and a linear layer on top of the final hidden state of the distillation token) e.g. for\n ImageNet.\n\n <Tip warning={true}>\n\n This model supports inference-only. Fine-tuning with distillation (i.e. with a teacher) is not yet\n supported.\n\n </Tip>\n ', EFFICIENTFORMER_START_DOCSTRING)
class EfficientFormerForImageClassificationWithTeacher(EfficientFormerPreTrainedModel):
def __init__(self, config: EfficientFormerConfig):
pass
@add_start_docstrings_to_model_forward(EFFICIENTFORMER_INPUTS_DOCSTRING)
@add_code_sample_docstrings(checkpoint=_IMAGE_CLASS_CHECKPOINT, output_type=EfficientFormerForImageClassificationWithTeacherOutput, config_class=_CONFIG_FOR_DOC, expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT)
def forward(self, pixel_values: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, EfficientFormerForImageClassificationWithTeacherOutput]:
pass
| 6
| 0
| 25
| 4
| 19
| 2
| 3
| 0.09
| 1
| 7
| 3
| 0
| 2
| 4
| 2
| 132
| 58
| 9
| 45
| 20
| 29
| 4
| 19
| 13
| 16
| 3
| 3
| 1
| 6
|
1,709
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/efficientformer/modeling_efficientformer.py
|
transformers.models.deprecated.efficientformer.modeling_efficientformer.EfficientFormerForImageClassificationWithTeacherOutput
|
from typing import Optional, Union
import torch
from dataclasses import dataclass
from ....utils import ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
@dataclass
class EfficientFormerForImageClassificationWithTeacherOutput(ModelOutput):
"""
Output type of [`EfficientFormerForImageClassificationWithTeacher`].
Args:
logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`):
Prediction scores as the average of the cls_logits and distillation logits.
cls_logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`):
Prediction scores of the classification head (i.e. the linear layer on top of the final hidden state of the
class token).
distillation_logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`):
Prediction scores of the distillation head (i.e. the linear layer on top of the final hidden state of the
distillation token).
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer
plus the initial embedding outputs.
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
the self-attention heads.
"""
logits: Optional[torch.FloatTensor] = None
cls_logits: Optional[torch.FloatTensor] = None
distillation_logits: Optional[torch.FloatTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor]] = None
attentions: Optional[tuple[torch.FloatTensor]] = None
|
@dataclass
class EfficientFormerForImageClassificationWithTeacherOutput(ModelOutput):
'''
Output type of [`EfficientFormerForImageClassificationWithTeacher`].
Args:
logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`):
Prediction scores as the average of the cls_logits and distillation logits.
cls_logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`):
Prediction scores of the classification head (i.e. the linear layer on top of the final hidden state of the
class token).
distillation_logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`):
Prediction scores of the distillation head (i.e. the linear layer on top of the final hidden state of the
distillation token).
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer
plus the initial embedding outputs.
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
the self-attention heads.
'''
pass
| 2
| 1
| 0
| 0
| 0
| 0
| 0
| 3.33
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 28
| 2
| 6
| 6
| 5
| 20
| 6
| 6
| 5
| 0
| 1
| 0
| 0
|
1,710
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/efficientformer/modeling_efficientformer.py
|
transformers.models.deprecated.efficientformer.modeling_efficientformer.EfficientFormerIntermediateStage
|
from .configuration_efficientformer import EfficientFormerConfig
import torch
from torch import nn
class EfficientFormerIntermediateStage(nn.Module):
def __init__(self, config: EfficientFormerConfig, index: int):
super().__init__()
self.meta4D_layers = EfficientFormerMeta4DLayers(config, index)
def forward(self, hidden_states: torch.Tensor) -> tuple[torch.Tensor]:
hidden_states = self.meta4D_layers(hidden_states)
return hidden_states
|
class EfficientFormerIntermediateStage(nn.Module):
def __init__(self, config: EfficientFormerConfig, index: int):
pass
def forward(self, hidden_states: torch.Tensor) -> tuple[torch.Tensor]:
pass
| 3
| 0
| 3
| 0
| 3
| 0
| 1
| 0
| 1
| 5
| 2
| 0
| 2
| 1
| 2
| 12
| 8
| 1
| 7
| 4
| 4
| 0
| 7
| 4
| 4
| 1
| 1
| 0
| 2
|
1,711
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/efficientformer/modeling_efficientformer.py
|
transformers.models.deprecated.efficientformer.modeling_efficientformer.EfficientFormerLastStage
|
from torch import nn
import torch
from .configuration_efficientformer import EfficientFormerConfig
class EfficientFormerLastStage(nn.Module):
def __init__(self, config: EfficientFormerConfig):
super().__init__()
self.meta4D_layers = EfficientFormerMeta4DLayers(config, -1)
self.flat = EfficientFormerFlat()
self.meta3D_layers = EfficientFormerMeta3DLayers(config)
def forward(self, hidden_states: torch.Tensor, output_attentions: bool=False) -> tuple[torch.Tensor]:
hidden_states = self.meta4D_layers(hidden_states)
hidden_states = self.flat(hidden_states)
hidden_states = self.meta3D_layers(hidden_states, output_attentions)
return hidden_states
|
class EfficientFormerLastStage(nn.Module):
def __init__(self, config: EfficientFormerConfig):
pass
def forward(self, hidden_states: torch.Tensor, output_attentions: bool=False) -> tuple[torch.Tensor]:
pass
| 3
| 0
| 6
| 1
| 5
| 0
| 1
| 0
| 1
| 7
| 4
| 0
| 2
| 3
| 2
| 12
| 13
| 2
| 11
| 6
| 8
| 0
| 11
| 6
| 8
| 1
| 1
| 0
| 2
|
1,712
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/efficientformer/modeling_efficientformer.py
|
transformers.models.deprecated.efficientformer.modeling_efficientformer.EfficientFormerMeta3D
|
from .configuration_efficientformer import EfficientFormerConfig
import torch
from torch import nn
class EfficientFormerMeta3D(nn.Module):
def __init__(self, config: EfficientFormerConfig, dim: int, drop_path: float=0.0):
super().__init__()
self.token_mixer = EfficientFormerSelfAttention(dim=config.dim, key_dim=config.key_dim, num_heads=config.num_attention_heads, attention_ratio=config.attention_ratio, resolution=config.resolution)
self.layernorm1 = nn.LayerNorm(dim, eps=config.layer_norm_eps)
self.layernorm2 = nn.LayerNorm(dim, eps=config.layer_norm_eps)
mlp_hidden_dim = int(dim * config.mlp_expansion_ratio)
self.mlp = EfficientFormerDenseMlp(config, in_features=dim, hidden_features=mlp_hidden_dim)
self.drop_path = EfficientFormerDropPath(drop_path) if drop_path > 0.0 else nn.Identity()
self.use_layer_scale = config.use_layer_scale
if config.use_layer_scale:
self.layer_scale_1 = nn.Parameter(config.layer_scale_init_value * torch.ones(dim), requires_grad=True)
self.layer_scale_2 = nn.Parameter(config.layer_scale_init_value * torch.ones(dim), requires_grad=True)
def forward(self, hidden_states: torch.Tensor, output_attentions: bool=False) -> tuple[torch.Tensor]:
self_attention_outputs = self.token_mixer(self.layernorm1(hidden_states), output_attentions)
attention_output = self_attention_outputs[0]
outputs = self_attention_outputs[1:]
if self.use_layer_scale:
layer_output = hidden_states + self.drop_path(self.layer_scale_1.unsqueeze(0).unsqueeze(0) * attention_output)
layer_output = layer_output + self.drop_path(self.layer_scale_2.unsqueeze(0).unsqueeze(0) * self.mlp(self.layernorm2(layer_output)))
else:
layer_output = hidden_states + self.drop_path(attention_output)
layer_output = layer_output + self.drop_path(self.mlp(self.layernorm2(layer_output)))
outputs = (layer_output,) + outputs
return outputs
|
class EfficientFormerMeta3D(nn.Module):
def __init__(self, config: EfficientFormerConfig, dim: int, drop_path: float=0.0):
pass
def forward(self, hidden_states: torch.Tensor, output_attentions: bool=False) -> tuple[torch.Tensor]:
pass
| 3
| 0
| 21
| 4
| 17
| 1
| 3
| 0.03
| 1
| 9
| 4
| 0
| 2
| 8
| 2
| 12
| 43
| 8
| 35
| 16
| 32
| 1
| 24
| 16
| 21
| 3
| 1
| 1
| 5
|
1,713
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/efficientformer/modeling_efficientformer.py
|
transformers.models.deprecated.efficientformer.modeling_efficientformer.EfficientFormerMeta3DLayers
|
from .configuration_efficientformer import EfficientFormerConfig
import torch
from torch import nn
class EfficientFormerMeta3DLayers(nn.Module):
def __init__(self, config: EfficientFormerConfig):
super().__init__()
drop_paths = [config.drop_path_rate * (block_idx + sum(config.depths[:-1])) for block_idx in range(config.num_meta3d_blocks)]
self.blocks = nn.ModuleList([EfficientFormerMeta3D(config, config.hidden_sizes[-1], drop_path=drop_path) for drop_path in drop_paths])
def forward(self, hidden_states: torch.Tensor, output_attentions: bool=False) -> tuple[torch.Tensor]:
all_attention_outputs = () if output_attentions else None
for layer_module in self.blocks:
if isinstance(hidden_states, tuple):
hidden_states = hidden_states[0]
hidden_states = layer_module(hidden_states, output_attentions)
if output_attentions:
all_attention_outputs = all_attention_outputs + (hidden_states[1],)
if output_attentions:
outputs = (hidden_states[0],) + all_attention_outputs
return outputs
return hidden_states
|
class EfficientFormerMeta3DLayers(nn.Module):
def __init__(self, config: EfficientFormerConfig):
pass
def forward(self, hidden_states: torch.Tensor, output_attentions: bool=False) -> tuple[torch.Tensor]:
pass
| 3
| 0
| 13
| 3
| 11
| 0
| 4
| 0
| 1
| 7
| 2
| 0
| 2
| 1
| 2
| 12
| 28
| 6
| 22
| 9
| 19
| 0
| 17
| 8
| 14
| 6
| 1
| 2
| 7
|
1,714
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/efficientformer/modeling_efficientformer.py
|
transformers.models.deprecated.efficientformer.modeling_efficientformer.EfficientFormerMeta4D
|
import torch
from .configuration_efficientformer import EfficientFormerConfig
from torch import nn
class EfficientFormerMeta4D(nn.Module):
def __init__(self, config: EfficientFormerConfig, dim: int, drop_path: float=0.0):
super().__init__()
pool_size = config.pool_size if config.pool_size is not None else 3
self.token_mixer = EfficientFormerPooling(pool_size=pool_size)
mlp_hidden_dim = int(dim * config.mlp_expansion_ratio)
self.mlp = EfficientFormerConvMlp(config, in_features=dim, hidden_features=mlp_hidden_dim, drop=config.hidden_dropout_prob)
self.drop_path = EfficientFormerDropPath(drop_path) if drop_path > 0.0 else nn.Identity()
self.use_layer_scale = config.use_layer_scale
if config.use_layer_scale:
self.layer_scale_1 = nn.Parameter(config.layer_scale_init_value * torch.ones(dim), requires_grad=True)
self.layer_scale_2 = nn.Parameter(config.layer_scale_init_value * torch.ones(dim), requires_grad=True)
def forward(self, hidden_states: torch.Tensor) -> tuple[torch.Tensor]:
outputs = self.token_mixer(hidden_states)
if self.use_layer_scale:
layer_output = hidden_states + self.drop_path(self.layer_scale_1.unsqueeze(-1).unsqueeze(-1) * outputs)
layer_output = layer_output + self.drop_path(self.layer_scale_2.unsqueeze(-1).unsqueeze(-1) * self.mlp(layer_output))
else:
layer_output = hidden_states + self.drop_path(outputs)
layer_output = layer_output + self.drop_path(self.mlp(layer_output))
return layer_output
|
class EfficientFormerMeta4D(nn.Module):
def __init__(self, config: EfficientFormerConfig, dim: int, drop_path: float=0.0):
pass
def forward(self, hidden_states: torch.Tensor) -> tuple[torch.Tensor]:
pass
| 3
| 0
| 14
| 2
| 12
| 0
| 3
| 0
| 1
| 8
| 4
| 0
| 2
| 6
| 2
| 12
| 30
| 5
| 25
| 13
| 22
| 0
| 20
| 13
| 17
| 4
| 1
| 1
| 6
|
1,715
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/efficientformer/modeling_efficientformer.py
|
transformers.models.deprecated.efficientformer.modeling_efficientformer.EfficientFormerMeta4DLayers
|
import torch
from torch import nn
from .configuration_efficientformer import EfficientFormerConfig
class EfficientFormerMeta4DLayers(nn.Module):
def __init__(self, config: EfficientFormerConfig, stage_idx: int):
super().__init__()
num_layers = config.depths[stage_idx] if stage_idx != -1 else config.depths[stage_idx] - config.num_meta3d_blocks
drop_paths = [config.drop_path_rate * (block_idx + sum(config.depths[:stage_idx])) for block_idx in range(num_layers)]
self.blocks = nn.ModuleList([EfficientFormerMeta4D(config, config.hidden_sizes[stage_idx], drop_path=drop_path) for drop_path in drop_paths])
def forward(self, hidden_states: torch.Tensor) -> tuple[torch.Tensor]:
for layer_module in self.blocks:
hidden_states = layer_module(hidden_states)
return hidden_states
|
class EfficientFormerMeta4DLayers(nn.Module):
def __init__(self, config: EfficientFormerConfig, stage_idx: int):
pass
def forward(self, hidden_states: torch.Tensor) -> tuple[torch.Tensor]:
pass
| 3
| 0
| 10
| 1
| 9
| 0
| 2
| 0
| 1
| 6
| 2
| 0
| 2
| 1
| 2
| 12
| 21
| 2
| 19
| 8
| 16
| 0
| 10
| 7
| 7
| 2
| 1
| 1
| 4
|
1,716
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/efficientformer/modeling_efficientformer.py
|
transformers.models.deprecated.efficientformer.modeling_efficientformer.EfficientFormerModel
|
from ....utils import ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from torch import nn
from ....modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, ImageClassifierOutput
import torch
from typing import Optional, Union
from .configuration_efficientformer import EfficientFormerConfig
@add_start_docstrings('The bare EfficientFormer Model transformer outputting raw hidden-states without any specific head on top.', EFFICIENTFORMER_START_DOCSTRING)
class EfficientFormerModel(EfficientFormerPreTrainedModel):
def __init__(self, config: EfficientFormerConfig):
super().__init__(config)
self.config = config
_no_split_modules = ['EfficientFormerMeta4D']
self.patch_embed = EfficientFormerConvStem(config, config.hidden_sizes[0])
self.encoder = EfficientFormerEncoder(config)
self.layernorm = nn.LayerNorm(config.hidden_sizes[-1], eps=config.layer_norm_eps)
self.post_init()
@add_start_docstrings_to_model_forward(EFFICIENTFORMER_INPUTS_DOCSTRING)
@add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPooling, config_class=_CONFIG_FOR_DOC, modality='vision', expected_output=_EXPECTED_OUTPUT_SHAPE)
def forward(self, pixel_values: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutput]:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('You have to specify pixel_values')
embedding_output = self.patch_embed(pixel_values)
encoder_outputs = self.encoder(embedding_output, output_attentions=output_attentions, output_hidden_states=output_hidden_states)
sequence_output = encoder_outputs[0]
sequence_output = self.layernorm(sequence_output)
if not return_dict:
head_outputs = (sequence_output,)
return head_outputs + encoder_outputs[1:]
return BaseModelOutput(last_hidden_state=sequence_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions)
|
@add_start_docstrings('The bare EfficientFormer Model transformer outputting raw hidden-states without any specific head on top.', EFFICIENTFORMER_START_DOCSTRING)
class EfficientFormerModel(EfficientFormerPreTrainedModel):
def __init__(self, config: EfficientFormerConfig):
pass
@add_start_docstrings_to_model_forward(EFFICIENTFORMER_INPUTS_DOCSTRING)
@add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPooling, config_class=_CONFIG_FOR_DOC, modality='vision', expected_output=_EXPECTED_OUTPUT_SHAPE)
def forward(self, pixel_values: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutput]:
pass
| 6
| 0
| 22
| 4
| 18
| 1
| 4
| 0.02
| 1
| 9
| 4
| 0
| 2
| 4
| 2
| 132
| 54
| 8
| 45
| 19
| 28
| 1
| 23
| 12
| 20
| 6
| 3
| 1
| 7
|
1,717
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/efficientformer/modeling_efficientformer.py
|
transformers.models.deprecated.efficientformer.modeling_efficientformer.EfficientFormerPatchEmbeddings
|
import torch
from .configuration_efficientformer import EfficientFormerConfig
from torch import nn
class EfficientFormerPatchEmbeddings(nn.Module):
"""
This class performs downsampling between two stages. For the input tensor with the shape [batch_size, num_channels,
height, width] it produces output tensor with the shape [batch_size, num_channels, height/stride, width/stride]
"""
def __init__(self, config: EfficientFormerConfig, num_channels: int, embed_dim: int, apply_norm: bool=True):
super().__init__()
self.num_channels = num_channels
self.projection = nn.Conv2d(num_channels, embed_dim, kernel_size=config.downsample_patch_size, stride=config.downsample_stride, padding=config.downsample_pad)
self.norm = nn.BatchNorm2d(embed_dim, eps=config.batch_norm_eps) if apply_norm else nn.Identity()
def forward(self, pixel_values: torch.Tensor) -> torch.Tensor:
batch_size, num_channels, height, width = pixel_values.shape
if num_channels != self.num_channels:
raise ValueError('Make sure that the channel dimension of the pixel values match with the one set in the configuration.')
embeddings = self.projection(pixel_values)
embeddings = self.norm(embeddings)
return embeddings
|
class EfficientFormerPatchEmbeddings(nn.Module):
'''
This class performs downsampling between two stages. For the input tensor with the shape [batch_size, num_channels,
height, width] it produces output tensor with the shape [batch_size, num_channels, height/stride, width/stride]
'''
def __init__(self, config: EfficientFormerConfig, num_channels: int, embed_dim: int, apply_norm: bool=True):
pass
def forward(self, pixel_values: torch.Tensor) -> torch.Tensor:
pass
| 3
| 1
| 12
| 2
| 10
| 0
| 2
| 0.19
| 1
| 6
| 1
| 0
| 2
| 3
| 2
| 12
| 30
| 5
| 21
| 8
| 18
| 4
| 13
| 8
| 10
| 2
| 1
| 1
| 4
|
1,718
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/efficientformer/modeling_efficientformer.py
|
transformers.models.deprecated.efficientformer.modeling_efficientformer.EfficientFormerPooling
|
from torch import nn
import torch
class EfficientFormerPooling(nn.Module):
def __init__(self, pool_size: int):
super().__init__()
self.pool = nn.AvgPool2d(pool_size, stride=1, padding=pool_size // 2, count_include_pad=False)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
output = self.pool(hidden_states) - hidden_states
return output
|
class EfficientFormerPooling(nn.Module):
def __init__(self, pool_size: int):
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 3
| 0
| 3
| 0
| 1
| 0
| 1
| 3
| 0
| 0
| 2
| 1
| 2
| 12
| 8
| 1
| 7
| 5
| 4
| 0
| 7
| 5
| 4
| 1
| 1
| 0
| 2
|
1,719
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/efficientformer/modeling_efficientformer.py
|
transformers.models.deprecated.efficientformer.modeling_efficientformer.EfficientFormerPreTrainedModel
|
from .configuration_efficientformer import EfficientFormerConfig
from ....modeling_utils import PreTrainedModel
from torch import nn
class EfficientFormerPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config: EfficientFormerConfig
base_model_prefix = 'efficientformer'
main_input_name = 'pixel_values'
supports_gradient_checkpointing = False
def _init_weights(self, module: nn.Module):
"""Initialize the weights"""
if isinstance(module, (nn.Linear, nn.Conv2d)):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
|
class EfficientFormerPreTrainedModel(PreTrainedModel):
'''
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
'''
def _init_weights(self, module: nn.Module):
'''Initialize the weights'''
pass
| 2
| 2
| 9
| 0
| 8
| 1
| 4
| 0.38
| 1
| 0
| 0
| 3
| 1
| 0
| 1
| 130
| 20
| 2
| 13
| 6
| 11
| 5
| 12
| 6
| 10
| 4
| 2
| 2
| 4
|
1,720
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/efficientformer/modeling_efficientformer.py
|
transformers.models.deprecated.efficientformer.modeling_efficientformer.EfficientFormerSelfAttention
|
import itertools
import torch
from torch import nn
class EfficientFormerSelfAttention(nn.Module):
def __init__(self, dim: int, key_dim: int, num_heads: int, attention_ratio: int, resolution: int):
super().__init__()
self.num_heads = num_heads
self.key_dim = key_dim
self.attention_ratio = attention_ratio
self.scale = key_dim ** (-0.5)
self.total_key_dim = key_dim * num_heads
self.expanded_key_dim = int(attention_ratio * key_dim)
self.total_expanded_key_dim = int(self.expanded_key_dim * num_heads)
hidden_size = self.total_expanded_key_dim + self.total_key_dim * 2
self.qkv = nn.Linear(dim, hidden_size)
self.projection = nn.Linear(self.total_expanded_key_dim, dim)
points = list(itertools.product(range(resolution), range(resolution)))
num_points = len(points)
attention_offsets = {}
idxs = []
for point_1 in points:
for point_2 in points:
offset = (abs(point_1[0] - point_2[0]), abs(point_1[1] - point_2[1]))
if offset not in attention_offsets:
attention_offsets[offset] = len(attention_offsets)
idxs.append(attention_offsets[offset])
self.attention_biases = torch.nn.Parameter(torch.zeros(num_heads, len(attention_offsets)))
self.register_buffer('attention_bias_idxs', torch.LongTensor(idxs).view(num_points, num_points))
@torch.no_grad()
def train(self, mode=True):
super().train(mode)
if mode and hasattr(self, 'ab'):
del self.ab
else:
self.ab = self.attention_biases[:, self.attention_bias_idxs]
def forward(self, hidden_states: torch.Tensor, output_attentions: bool=False) -> tuple[torch.Tensor]:
batch_size, sequence_length, num_channels = hidden_states.shape
qkv = self.qkv(hidden_states)
query_layer, key_layer, value_layer = qkv.reshape(batch_size, sequence_length, self.num_heads, -1).split([self.key_dim, self.key_dim, self.expanded_key_dim], dim=3)
query_layer = query_layer.permute(0, 2, 1, 3)
key_layer = key_layer.permute(0, 2, 1, 3)
value_layer = value_layer.permute(0, 2, 1, 3)
if not self.training:
self.ab = self.ab.to(self.attention_biases.device)
attention_probs = torch.matmul(query_layer, key_layer.transpose(-2, -1)) * self.scale + (self.attention_biases[:, self.attention_bias_idxs] if self.training else self.ab)
attention_probs = attention_probs.softmax(dim=-1)
context_layer = torch.matmul(attention_probs, value_layer).transpose(1, 2)
context_layer = context_layer.reshape(batch_size, sequence_length, self.total_expanded_key_dim)
context_layer = self.projection(context_layer)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
return outputs
|
class EfficientFormerSelfAttention(nn.Module):
def __init__(self, dim: int, key_dim: int, num_heads: int, attention_ratio: int, resolution: int):
pass
@torch.no_grad()
def train(self, mode=True):
pass
def forward(self, hidden_states: torch.Tensor, output_attentions: bool=False) -> tuple[torch.Tensor]:
pass
| 5
| 0
| 19
| 2
| 17
| 1
| 3
| 0.04
| 1
| 7
| 0
| 0
| 3
| 11
| 3
| 13
| 62
| 8
| 52
| 30
| 47
| 2
| 46
| 29
| 42
| 4
| 1
| 3
| 10
|
1,721
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/ernie_m/configuration_ernie_m.py
|
transformers.models.deprecated.ernie_m.configuration_ernie_m.ErnieMConfig
|
from ....configuration_utils import PretrainedConfig
class ErnieMConfig(PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`ErnieMModel`]. It is used to instantiate a
Ernie-M model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the `Ernie-M`
[susnato/ernie-m-base_pytorch](https://huggingface.co/susnato/ernie-m-base_pytorch) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 250002):
Vocabulary size of `inputs_ids` in [`ErnieMModel`]. Also is the vocab size of token embedding matrix.
Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling
[`ErnieMModel`].
hidden_size (`int`, *optional*, defaults to 768):
Dimensionality of the embedding layer, encoder layers and pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (`int`, *optional*, defaults to 3072):
Dimensionality of the feed-forward (ff) layer in the encoder. Input tensors to feed-forward layers are
firstly projected from hidden_size to intermediate_size, and then projected back to hidden_size. Typically
intermediate_size is larger than hidden_size.
hidden_act (`str`, *optional*, defaults to `"gelu"`):
The non-linear activation function in the feed-forward layer. `"gelu"`, `"relu"` and any other torch
supported activation functions are supported.
hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings and encoder.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout probability used in `MultiHeadAttention` in all encoder layers to drop some attention target.
max_position_embeddings (`int`, *optional*, defaults to 514):
The maximum value of the dimensionality of position encoding, which dictates the maximum supported length
of an input sequence.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the normal initializer for initializing all weight matrices. The index of padding
token in the token vocabulary.
pad_token_id (`int`, *optional*, defaults to 1):
Padding token id.
layer_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the layer normalization layers.
classifier_dropout (`float`, *optional*):
The dropout ratio for the classification head.
act_dropout (`float`, *optional*, defaults to 0.0):
This dropout probability is used in `ErnieMEncoderLayer` after activation.
A normal_initializer initializes weight matrices as normal distributions. See
`ErnieMPretrainedModel._init_weights()` for how weights are initialized in `ErnieMModel`.
"""
model_type = 'ernie_m'
attribute_map: dict[str, str] = {'dropout': 'classifier_dropout', 'num_classes': 'num_labels'}
def __init__(self, vocab_size: int=250002, hidden_size: int=768, num_hidden_layers: int=12, num_attention_heads: int=12, intermediate_size: int=3072, hidden_act: str='gelu', hidden_dropout_prob: float=0.1, attention_probs_dropout_prob: float=0.1, max_position_embeddings: int=514, initializer_range: float=0.02, pad_token_id: int=1, layer_norm_eps: float=1e-05, classifier_dropout=None, act_dropout=0.0, **kwargs):
super().__init__(pad_token_id=pad_token_id, **kwargs)
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.classifier_dropout = classifier_dropout
self.act_dropout = act_dropout
|
class ErnieMConfig(PretrainedConfig):
'''
This is the configuration class to store the configuration of a [`ErnieMModel`]. It is used to instantiate a
Ernie-M model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the `Ernie-M`
[susnato/ernie-m-base_pytorch](https://huggingface.co/susnato/ernie-m-base_pytorch) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 250002):
Vocabulary size of `inputs_ids` in [`ErnieMModel`]. Also is the vocab size of token embedding matrix.
Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling
[`ErnieMModel`].
hidden_size (`int`, *optional*, defaults to 768):
Dimensionality of the embedding layer, encoder layers and pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (`int`, *optional*, defaults to 3072):
Dimensionality of the feed-forward (ff) layer in the encoder. Input tensors to feed-forward layers are
firstly projected from hidden_size to intermediate_size, and then projected back to hidden_size. Typically
intermediate_size is larger than hidden_size.
hidden_act (`str`, *optional*, defaults to `"gelu"`):
The non-linear activation function in the feed-forward layer. `"gelu"`, `"relu"` and any other torch
supported activation functions are supported.
hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings and encoder.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout probability used in `MultiHeadAttention` in all encoder layers to drop some attention target.
max_position_embeddings (`int`, *optional*, defaults to 514):
The maximum value of the dimensionality of position encoding, which dictates the maximum supported length
of an input sequence.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the normal initializer for initializing all weight matrices. The index of padding
token in the token vocabulary.
pad_token_id (`int`, *optional*, defaults to 1):
Padding token id.
layer_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the layer normalization layers.
classifier_dropout (`float`, *optional*):
The dropout ratio for the classification head.
act_dropout (`float`, *optional*, defaults to 0.0):
This dropout probability is used in `ErnieMEncoderLayer` after activation.
A normal_initializer initializes weight matrices as normal distributions. See
`ErnieMPretrainedModel._init_weights()` for how weights are initialized in `ErnieMModel`.
'''
def __init__(self, vocab_size: int=250002, hidden_size: int=768, num_hidden_layers: int=12, num_attention_heads: int=12, intermediate_size: int=3072, hidden_act: str='gelu', hidden_dropout_prob: float=0.1, attention_probs_dropout_prob: float=0.1, max_position_embeddings: int=514, initializer_range: float=0.02, pad_token_id: int=1, layer_norm_eps: float=1e-05, classifier_dropout=None, act_dropout=0.0, **kwargs):
pass
| 2
| 1
| 32
| 0
| 32
| 0
| 1
| 1.31
| 1
| 4
| 0
| 0
| 1
| 13
| 1
| 33
| 87
| 6
| 35
| 34
| 16
| 46
| 18
| 17
| 16
| 1
| 2
| 0
| 1
|
1,722
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/ernie_m/modeling_ernie_m.py
|
transformers.models.deprecated.ernie_m.modeling_ernie_m.ErnieMAttention
|
from ....pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer
from ....cache_utils import Cache
from ....utils.deprecation import deprecate_kwarg
from torch import nn, tensor
import torch
from typing import Optional, Union
class ErnieMAttention(nn.Module):
def __init__(self, config, position_embedding_type=None):
super().__init__()
self.self_attn = ErnieMSelfAttention(config, position_embedding_type=position_embedding_type)
self.out_proj = nn.Linear(config.hidden_size, config.hidden_size)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(heads, self.self_attn.num_attention_heads, self.self_attn.attention_head_size, self.pruned_heads)
self.self_attn.q_proj = prune_linear_layer(self.self_attn.q_proj, index)
self.self_attn.k_proj = prune_linear_layer(self.self_attn.k_proj, index)
self.self_attn.v_proj = prune_linear_layer(self.self_attn.v_proj, index)
self.out_proj = prune_linear_layer(self.out_proj, index, dim=1)
self.self_attn.num_attention_heads = self.self_attn.num_attention_heads - len(heads)
self.self_attn.all_head_size = self.self_attn.attention_head_size * self.self_attn.num_attention_heads
self.pruned_heads = self.pruned_heads.union(heads)
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_values: Optional[Cache]=None, output_attentions: Optional[bool]=False) -> tuple[torch.Tensor]:
self_outputs = self.self_attn(hidden_states, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, past_key_values, output_attentions)
attention_output = self.out_proj(self_outputs[0])
outputs = (attention_output,) + self_outputs[1:]
return outputs
|
class ErnieMAttention(nn.Module):
def __init__(self, config, position_embedding_type=None):
pass
def prune_heads(self, heads):
pass
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_values: Optional[Cache]=None, output_attentions: Optional[bool]=False) -> tuple[torch.Tensor]:
pass
| 5
| 0
| 15
| 1
| 13
| 1
| 1
| 0.07
| 1
| 5
| 1
| 0
| 3
| 3
| 3
| 13
| 47
| 4
| 41
| 20
| 28
| 3
| 22
| 11
| 18
| 2
| 1
| 1
| 4
|
1,723
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/ernie_m/modeling_ernie_m.py
|
transformers.models.deprecated.ernie_m.modeling_ernie_m.ErnieMEmbeddings
|
import torch
from torch import nn, tensor
from typing import Optional, Union
class ErnieMEmbeddings(nn.Module):
"""Construct the embeddings from word and position embeddings."""
def __init__(self, config):
super().__init__()
self.hidden_size = config.hidden_size
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size, padding_idx=config.pad_token_id)
self.layer_norm = nn.LayerNorm(normalized_shape=config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(p=config.hidden_dropout_prob)
self.padding_idx = config.pad_token_id
def forward(self, input_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.LongTensor]=None, past_key_values_length: int=0) -> torch.Tensor:
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
if position_ids is None:
input_shape = inputs_embeds.size()[:-1]
ones = torch.ones(input_shape, dtype=torch.int64, device=inputs_embeds.device)
seq_length = torch.cumsum(ones, dim=1)
position_ids = seq_length - ones
if past_key_values_length > 0:
position_ids = position_ids + past_key_values_length
position_ids += 2
position_embeddings = self.position_embeddings(position_ids)
embeddings = inputs_embeds + position_embeddings
embeddings = self.layer_norm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
|
class ErnieMEmbeddings(nn.Module):
'''Construct the embeddings from word and position embeddings.'''
def __init__(self, config):
pass
def forward(self, input_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.LongTensor]=None, past_key_values_length: int=0) -> torch.Tensor:
pass
| 3
| 1
| 18
| 1
| 16
| 1
| 3
| 0.06
| 1
| 3
| 0
| 0
| 2
| 6
| 2
| 12
| 39
| 4
| 33
| 20
| 24
| 2
| 25
| 14
| 22
| 4
| 1
| 2
| 5
|
1,724
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/ernie_m/modeling_ernie_m.py
|
transformers.models.deprecated.ernie_m.modeling_ernie_m.ErnieMEncoder
|
from typing import Optional, Union
from ....modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput
import torch
from ....cache_utils import Cache
from torch import nn, tensor
class ErnieMEncoder(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.layers = nn.ModuleList([ErnieMEncoderLayer(config) for _ in range(config.num_hidden_layers)])
def forward(self, input_embeds: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, past_key_values: Optional[Cache]=None, output_attentions: Optional[bool]=False, output_hidden_states: Optional[bool]=False, return_dict: Optional[bool]=True) -> Union[tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]:
hidden_states = () if output_hidden_states else None
attentions = () if output_attentions else None
output = input_embeds
if output_hidden_states:
hidden_states = hidden_states + (output,)
for i, layer in enumerate(self.layers):
layer_head_mask = head_mask[i] if head_mask is not None else None
output, opt_attn_weights = layer(hidden_states=output, attention_mask=attention_mask, head_mask=layer_head_mask, past_key_values=past_key_values[i] if past_key_values is not None else None)
if output_hidden_states:
hidden_states = hidden_states + (output,)
if output_attentions:
attentions = attentions + (opt_attn_weights,)
last_hidden_state = output
if not return_dict:
return tuple((v for v in [last_hidden_state, hidden_states, attentions] if v is not None))
return BaseModelOutputWithPastAndCrossAttentions(last_hidden_state=last_hidden_state, hidden_states=hidden_states, attentions=attentions)
|
class ErnieMEncoder(nn.Module):
def __init__(self, config):
pass
def forward(self, input_embeds: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, past_key_values: Optional[Cache]=None, output_attentions: Optional[bool]=False, output_hidden_states: Optional[bool]=False, return_dict: Optional[bool]=True) -> Union[tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]:
pass
| 3
| 0
| 22
| 3
| 19
| 0
| 6
| 0
| 1
| 8
| 2
| 0
| 2
| 2
| 2
| 12
| 45
| 6
| 39
| 22
| 27
| 0
| 23
| 13
| 20
| 10
| 1
| 2
| 11
|
1,725
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/ernie_m/modeling_ernie_m.py
|
transformers.models.deprecated.ernie_m.modeling_ernie_m.ErnieMEncoderLayer
|
from ....activations import ACT2FN
from torch import nn, tensor
import torch
from typing import Optional, Union
from ....cache_utils import Cache
from ....utils.deprecation import deprecate_kwarg
class ErnieMEncoderLayer(nn.Module):
def __init__(self, config):
super().__init__()
dropout = 0.1 if config.hidden_dropout_prob is None else config.hidden_dropout_prob
act_dropout = config.hidden_dropout_prob if config.act_dropout is None else config.act_dropout
self.self_attn = ErnieMAttention(config)
self.linear1 = nn.Linear(config.hidden_size, config.intermediate_size)
self.dropout = nn.Dropout(act_dropout)
self.linear2 = nn.Linear(config.intermediate_size, config.hidden_size)
self.norm1 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.norm2 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
if isinstance(config.hidden_act, str):
self.activation = ACT2FN[config.hidden_act]
else:
self.activation = config.hidden_act
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, past_key_values: Optional[Cache]=None, output_attentions: Optional[bool]=True):
residual = hidden_states
if output_attentions:
hidden_states, attention_opt_weights = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, head_mask=head_mask, past_key_values=past_key_values, output_attentions=output_attentions)
else:
hidden_states = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, head_mask=head_mask, past_key_values=past_key_values, output_attentions=output_attentions)
hidden_states = residual + self.dropout1(hidden_states)
hidden_states = self.norm1(hidden_states)
residual = hidden_states
hidden_states = self.linear1(hidden_states)
hidden_states = self.activation(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.linear2(hidden_states)
hidden_states = residual + self.dropout2(hidden_states)
hidden_states = self.norm2(hidden_states)
if output_attentions:
return (hidden_states, attention_opt_weights)
else:
return hidden_states
|
class ErnieMEncoderLayer(nn.Module):
def __init__(self, config):
pass
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, past_key_values: Optional[Cache]=None, output_attentions: Optional[bool]=True):
pass
| 4
| 0
| 30
| 2
| 27
| 1
| 4
| 0.02
| 1
| 5
| 1
| 0
| 2
| 9
| 2
| 12
| 61
| 5
| 55
| 23
| 45
| 1
| 33
| 16
| 30
| 4
| 1
| 1
| 7
|
1,726
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/ernie_m/modeling_ernie_m.py
|
transformers.models.deprecated.ernie_m.modeling_ernie_m.ErnieMForInformationExtraction
|
from ....modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput
from ....utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
import torch
from torch import nn, tensor
from typing import Optional, Union
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
@add_start_docstrings('ErnieMForInformationExtraction is a Ernie-M Model with two linear layer on top of the hidden-states output to\n compute `start_prob` and `end_prob`, designed for Universal Information Extraction.', ERNIE_M_START_DOCSTRING)
class ErnieMForInformationExtraction(ErnieMPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.ernie_m = ErnieMModel(config)
self.linear_start = nn.Linear(config.hidden_size, 1)
self.linear_end = nn.Linear(config.hidden_size, 1)
self.sigmoid = nn.Sigmoid()
self.post_init()
@add_start_docstrings_to_model_forward(ERNIE_M_INPUTS_DOCSTRING.format('batch_size, num_choices, sequence_length'))
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, start_positions: Optional[torch.Tensor]=None, end_positions: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=True) -> Union[tuple[torch.FloatTensor], QuestionAnsweringModelOutput]:
"""
start_positions (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for position (index) for computing the start_positions loss. Position outside of the sequence are
not taken into account for computing the loss.
end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for position (index) for computing the end_positions loss. Position outside of the sequence are not
taken into account for computing the loss.
"""
result = self.ernie_m(input_ids, attention_mask=attention_mask, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
if return_dict:
sequence_output = result.last_hidden_state
elif not return_dict:
sequence_output = result[0]
start_logits = self.linear_start(sequence_output)
start_logits = start_logits.squeeze(-1)
end_logits = self.linear_end(sequence_output)
end_logits = end_logits.squeeze(-1)
total_loss = None
if start_positions is not None and end_positions is not None:
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
ignored_index = start_logits.size(1)
start_positions = start_positions.clamp(0, ignored_index)
end_positions = end_positions.clamp(0, ignored_index)
loss_fct = BCEWithLogitsLoss()
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
if not return_dict:
return tuple((i for i in [total_loss, start_logits, end_logits, result.hidden_states, result.attentions] if i is not None))
return QuestionAnsweringModelOutput(loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=result.hidden_states, attentions=result.attentions)
|
@add_start_docstrings('ErnieMForInformationExtraction is a Ernie-M Model with two linear layer on top of the hidden-states output to\n compute `start_prob` and `end_prob`, designed for Universal Information Extraction.', ERNIE_M_START_DOCSTRING)
class ErnieMForInformationExtraction(ErnieMPreTrainedModel):
def __init__(self, config):
pass
@add_start_docstrings_to_model_forward(ERNIE_M_INPUTS_DOCSTRING.format('batch_size, num_choices, sequence_length'))
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, start_positions: Optional[torch.Tensor]=None, end_positions: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=True) -> Union[tuple[torch.FloatTensor], QuestionAnsweringModelOutput]:
'''
start_positions (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for position (index) for computing the start_positions loss. Position outside of the sequence are
not taken into account for computing the loss.
end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for position (index) for computing the end_positions loss. Position outside of the sequence are not
taken into account for computing the loss.
'''
pass
| 5
| 1
| 40
| 3
| 32
| 5
| 4
| 0.15
| 1
| 6
| 2
| 0
| 2
| 4
| 2
| 132
| 83
| 7
| 66
| 29
| 50
| 10
| 33
| 16
| 30
| 7
| 3
| 2
| 8
|
1,727
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/ernie_m/modeling_ernie_m.py
|
transformers.models.deprecated.ernie_m.modeling_ernie_m.ErnieMForMultipleChoice
|
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
import torch
from torch import nn, tensor
from ....modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput
from ....utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from typing import Optional, Union
@add_start_docstrings('ErnieM Model with a multiple choice classification head on top (a linear layer on top of\n the pooled output and a softmax) e.g. for RocStories/SWAG tasks.', ERNIE_M_START_DOCSTRING)
class ErnieMForMultipleChoice(ErnieMPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.ernie_m = ErnieMModel(config)
classifier_dropout = config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
self.dropout = nn.Dropout(classifier_dropout)
self.classifier = nn.Linear(config.hidden_size, 1)
self.post_init()
@add_start_docstrings_to_model_forward(ERNIE_M_INPUTS_DOCSTRING.format('batch_size, num_choices, sequence_length'))
@add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=MultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC)
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=True) -> Union[tuple[torch.FloatTensor], MultipleChoiceModelOutput]:
"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
`input_ids` above)
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
inputs_embeds = inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1)) if inputs_embeds is not None else None
outputs = self.ernie_m(input_ids, attention_mask=attention_mask, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.view(-1, num_choices)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
if not return_dict:
output = (reshaped_logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return MultipleChoiceModelOutput(loss=loss, logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
|
@add_start_docstrings('ErnieM Model with a multiple choice classification head on top (a linear layer on top of\n the pooled output and a softmax) e.g. for RocStories/SWAG tasks.', ERNIE_M_START_DOCSTRING)
class ErnieMForMultipleChoice(ErnieMPreTrainedModel):
def __init__(self, config):
pass
@add_start_docstrings_to_model_forward(ERNIE_M_INPUTS_DOCSTRING.format('batch_size, num_choices, sequence_length'))
@add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=MultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC)
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=True) -> Union[tuple[torch.FloatTensor], MultipleChoiceModelOutput]:
'''
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
`input_ids` above)
'''
pass
| 6
| 1
| 37
| 5
| 29
| 4
| 6
| 0.11
| 1
| 5
| 2
| 0
| 2
| 3
| 2
| 132
| 82
| 10
| 65
| 27
| 45
| 7
| 28
| 15
| 25
| 10
| 3
| 1
| 12
|
1,728
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/ernie_m/modeling_ernie_m.py
|
transformers.models.deprecated.ernie_m.modeling_ernie_m.ErnieMForQuestionAnswering
|
from typing import Optional, Union
from ....utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from torch import nn, tensor
from ....modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput
import torch
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
@add_start_docstrings('ErnieM Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear\n layers on top of the hidden-states output to compute `span start logits` and `span end logits`).', ERNIE_M_START_DOCSTRING)
class ErnieMForQuestionAnswering(ErnieMPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.ernie_m = ErnieMModel(config, add_pooling_layer=False)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
self.post_init()
@add_start_docstrings_to_model_forward(ERNIE_M_INPUTS_DOCSTRING.format('batch_size, sequence_length'))
@add_code_sample_docstrings(processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=QuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC)
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, start_positions: Optional[torch.Tensor]=None, end_positions: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=True) -> Union[tuple[torch.FloatTensor], QuestionAnsweringModelOutput]:
"""
start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
are not taken into account for computing the loss.
end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
are not taken into account for computing the loss.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.ernie_m(input_ids, attention_mask=attention_mask, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1).contiguous()
end_logits = end_logits.squeeze(-1).contiguous()
total_loss = None
if start_positions is not None and end_positions is not None:
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
ignored_index = start_logits.size(1)
start_positions = start_positions.clamp(0, ignored_index)
end_positions = end_positions.clamp(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
if not return_dict:
output = (start_logits, end_logits) + outputs[2:]
return (total_loss,) + output if total_loss is not None else output
return QuestionAnsweringModelOutput(loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
|
@add_start_docstrings('ErnieM Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear\n layers on top of the hidden-states output to compute `span start logits` and `span end logits`).', ERNIE_M_START_DOCSTRING)
class ErnieMForQuestionAnswering(ErnieMPreTrainedModel):
def __init__(self, config):
pass
@add_start_docstrings_to_model_forward(ERNIE_M_INPUTS_DOCSTRING.format('batch_size, sequence_length'))
@add_code_sample_docstrings(processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=QuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC)
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, start_positions: Optional[torch.Tensor]=None, end_positions: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=True) -> Union[tuple[torch.FloatTensor], QuestionAnsweringModelOutput]:
'''
start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
are not taken into account for computing the loss.
end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
are not taken into account for computing the loss.
'''
pass
| 6
| 1
| 40
| 5
| 29
| 7
| 4
| 0.2
| 1
| 5
| 2
| 0
| 2
| 3
| 2
| 132
| 89
| 10
| 66
| 29
| 44
| 13
| 32
| 16
| 29
| 7
| 3
| 2
| 8
|
1,729
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/ernie_m/modeling_ernie_m.py
|
transformers.models.deprecated.ernie_m.modeling_ernie_m.ErnieMForSequenceClassification
|
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ....cache_utils import Cache
from ....modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput
import torch
from ....utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from typing import Optional, Union
from torch import nn, tensor
@add_start_docstrings('ErnieM Model transformer with a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks.', ERNIE_M_START_DOCSTRING)
class ErnieMForSequenceClassification(ErnieMPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.config = config
self.ernie_m = ErnieMModel(config)
classifier_dropout = config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
self.dropout = nn.Dropout(classifier_dropout)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.post_init()
@add_start_docstrings_to_model_forward(ERNIE_M_INPUTS_DOCSTRING.format('batch_size, sequence_length'))
@add_code_sample_docstrings(processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC)
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, use_cache: Optional[bool]=None, output_hidden_states: Optional[bool]=None, output_attentions: Optional[bool]=None, return_dict: Optional[bool]=True, labels: Optional[torch.Tensor]=None) -> Union[tuple[torch.FloatTensor], SequenceClassifierOutput]:
"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.ernie_m(input_ids, attention_mask=attention_mask, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, past_key_values=past_key_values, output_hidden_states=output_hidden_states, output_attentions=output_attentions, return_dict=return_dict)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
loss = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
self.config.problem_type = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = 'single_label_classification'
else:
self.config.problem_type = 'multi_label_classification'
if self.config.problem_type == 'regression':
loss_fct = MSELoss()
if self.num_labels == 1:
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(logits, labels)
elif self.config.problem_type == 'single_label_classification':
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
elif self.config.problem_type == 'multi_label_classification':
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
if not return_dict:
output = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return SequenceClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
|
@add_start_docstrings('ErnieM Model transformer with a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks.', ERNIE_M_START_DOCSTRING)
class ErnieMForSequenceClassification(ErnieMPreTrainedModel):
def __init__(self, config):
pass
@add_start_docstrings_to_model_forward(ERNIE_M_INPUTS_DOCSTRING.format('batch_size, sequence_length'))
@add_code_sample_docstrings(processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC)
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, use_cache: Optional[bool]=None, output_hidden_states: Optional[bool]=None, output_attentions: Optional[bool]=None, return_dict: Optional[bool]=True, labels: Optional[torch.Tensor]=None) -> Union[tuple[torch.FloatTensor], SequenceClassifierOutput]:
'''
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
'''
pass
| 6
| 1
| 43
| 4
| 35
| 4
| 7
| 0.09
| 1
| 6
| 2
| 0
| 2
| 5
| 2
| 132
| 94
| 9
| 78
| 29
| 55
| 7
| 36
| 15
| 33
| 12
| 3
| 3
| 14
|
1,730
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/ernie_m/modeling_ernie_m.py
|
transformers.models.deprecated.ernie_m.modeling_ernie_m.ErnieMForTokenClassification
|
from ....cache_utils import Cache
from typing import Optional, Union
from torch import nn, tensor
import torch
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ....modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput
from ....utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
@add_start_docstrings('ErnieM Model with a token classification head on top (a linear layer on top of\n the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.', ERNIE_M_START_DOCSTRING)
class ErnieMForTokenClassification(ErnieMPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.ernie_m = ErnieMModel(config, add_pooling_layer=False)
classifier_dropout = config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
self.dropout = nn.Dropout(classifier_dropout)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.post_init()
@add_start_docstrings_to_model_forward(ERNIE_M_INPUTS_DOCSTRING.format('batch_size, sequence_length'))
@add_code_sample_docstrings(processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC)
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, output_hidden_states: Optional[bool]=None, output_attentions: Optional[bool]=None, return_dict: Optional[bool]=True, labels: Optional[torch.Tensor]=None) -> Union[tuple[torch.FloatTensor], TokenClassifierOutput]:
"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.ernie_m(input_ids, attention_mask=attention_mask, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, past_key_values=past_key_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return TokenClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
|
@add_start_docstrings('ErnieM Model with a token classification head on top (a linear layer on top of\n the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.', ERNIE_M_START_DOCSTRING)
class ErnieMForTokenClassification(ErnieMPreTrainedModel):
def __init__(self, config):
pass
@add_start_docstrings_to_model_forward(ERNIE_M_INPUTS_DOCSTRING.format('batch_size, sequence_length'))
@add_code_sample_docstrings(processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC)
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, output_hidden_states: Optional[bool]=None, output_attentions: Optional[bool]=None, return_dict: Optional[bool]=True, labels: Optional[torch.Tensor]=None) -> Union[tuple[torch.FloatTensor], TokenClassifierOutput]:
'''
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
'''
pass
| 6
| 1
| 32
| 4
| 26
| 3
| 4
| 0.08
| 1
| 5
| 2
| 0
| 2
| 4
| 2
| 132
| 73
| 9
| 59
| 27
| 37
| 5
| 23
| 14
| 20
| 5
| 3
| 1
| 7
|
1,731
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/ernie_m/modeling_ernie_m.py
|
transformers.models.deprecated.ernie_m.modeling_ernie_m.ErnieMModel
|
from typing import Optional, Union
from ....utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
import torch
from torch import nn, tensor
from ....modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput
@add_start_docstrings('The bare ErnieM Model transformer outputting raw hidden-states without any specific head on top.', ERNIE_M_START_DOCSTRING)
class ErnieMModel(ErnieMPreTrainedModel):
def __init__(self, config, add_pooling_layer=True):
super().__init__(config)
self.initializer_range = config.initializer_range
self.embeddings = ErnieMEmbeddings(config)
self.encoder = ErnieMEncoder(config)
self.pooler = ErnieMPooler(config) if add_pooling_layer else None
self.post_init()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layers[layer].self_attn.prune_heads(heads)
@add_start_docstrings_to_model_forward(ERNIE_M_INPUTS_DOCSTRING.format('batch_size, sequence_length'))
@add_code_sample_docstrings(processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPastAndCrossAttentions, config_class=_CONFIG_FOR_DOC)
def forward(self, input_ids: Optional[tensor]=None, position_ids: Optional[tensor]=None, attention_mask: Optional[tensor]=None, head_mask: Optional[tensor]=None, inputs_embeds: Optional[tensor]=None, past_key_values: Optional[tuple[tuple[tensor]]]=None, use_cache: Optional[bool]=None, output_hidden_states: Optional[bool]=None, output_attentions: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.FloatTensor], BaseModelOutputWithPoolingAndCrossAttentions]:
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time.')
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.return_dict
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
past_key_values_length = 0
if past_key_values is not None:
past_key_values_length = past_key_values.get_seq_length()
if attention_mask is None:
attention_mask = (input_ids == self.config.pad_token_id).to(torch.float32)
attention_mask *= torch.finfo(attention_mask.dtype).min
if past_key_values is not None:
batch_size = past_key_values[0][0].shape[0]
past_mask = torch.zeros([batch_size, 1, 1, past_key_values_length], dtype=attention_mask.dtype)
attention_mask = torch.concat([past_mask, attention_mask], dim=-1)
elif attention_mask.ndim == 2:
attention_mask = attention_mask.to(torch.float32)
attention_mask = 1.0 - attention_mask
attention_mask *= torch.finfo(attention_mask.dtype).min
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(1)
embedding_output = self.embeddings(input_ids=input_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, past_key_values_length=past_key_values_length)
encoder_outputs = self.encoder(embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, past_key_values=past_key_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
if not return_dict:
sequence_output = encoder_outputs[0]
pooler_output = self.pooler(sequence_output) if self.pooler is not None else None
return (sequence_output, pooler_output) + encoder_outputs[1:]
sequence_output = encoder_outputs['last_hidden_state']
pooler_output = self.pooler(sequence_output) if self.pooler is not None else None
hidden_states = None if not output_hidden_states else encoder_outputs['hidden_states']
attentions = None if not output_attentions else encoder_outputs['attentions']
return BaseModelOutputWithPoolingAndCrossAttentions(last_hidden_state=sequence_output, pooler_output=pooler_output, hidden_states=hidden_states, attentions=attentions)
|
@add_start_docstrings('The bare ErnieM Model transformer outputting raw hidden-states without any specific head on top.', ERNIE_M_START_DOCSTRING)
class ErnieMModel(ErnieMPreTrainedModel):
def __init__(self, config, add_pooling_layer=True):
pass
def get_input_embeddings(self):
pass
def set_input_embeddings(self, value):
pass
def _prune_heads(self, heads_to_prune):
'''
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
'''
pass
@add_start_docstrings_to_model_forward(ERNIE_M_INPUTS_DOCSTRING.format('batch_size, sequence_length'))
@add_code_sample_docstrings(processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPastAndCrossAttentions, config_class=_CONFIG_FOR_DOC)
def forward(self, input_ids: Optional[tensor]=None, position_ids: Optional[tensor]=None, attention_mask: Optional[tensor]=None, head_mask: Optional[tensor]=None, inputs_embeds: Optional[tensor]=None, past_key_values: Optional[tuple[tuple[tensor]]]=None, use_cache: Optional[bool]=None, output_hidden_states: Optional[bool]=None, output_attentions: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.FloatTensor], BaseModelOutputWithPoolingAndCrossAttentions]:
pass
| 9
| 1
| 19
| 2
| 16
| 1
| 4
| 0.08
| 1
| 7
| 4
| 0
| 5
| 4
| 5
| 135
| 107
| 13
| 87
| 34
| 62
| 7
| 47
| 21
| 41
| 14
| 3
| 2
| 20
|
1,732
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/ernie_m/modeling_ernie_m.py
|
transformers.models.deprecated.ernie_m.modeling_ernie_m.ErnieMPooler
|
from torch import nn, tensor
import torch
class ErnieMPooler(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
|
class ErnieMPooler(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 6
| 0
| 5
| 1
| 1
| 0.2
| 1
| 2
| 0
| 0
| 2
| 2
| 2
| 12
| 13
| 1
| 10
| 7
| 7
| 2
| 10
| 7
| 7
| 1
| 1
| 0
| 2
|
1,733
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/ernie_m/modeling_ernie_m.py
|
transformers.models.deprecated.ernie_m.modeling_ernie_m.ErnieMPreTrainedModel
|
from torch import nn, tensor
from .configuration_ernie_m import ErnieMConfig
from ....modeling_utils import PreTrainedModel
class ErnieMPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config: ErnieMConfig
base_model_prefix = 'ernie_m'
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, nn.Linear):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
|
class ErnieMPreTrainedModel(PreTrainedModel):
'''
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
'''
def _init_weights(self, module):
'''Initialize the weights'''
pass
| 2
| 2
| 15
| 0
| 12
| 3
| 6
| 0.47
| 1
| 0
| 0
| 6
| 1
| 0
| 1
| 130
| 24
| 2
| 15
| 4
| 13
| 7
| 13
| 4
| 11
| 6
| 2
| 2
| 6
|
1,734
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/ernie_m/modeling_ernie_m.py
|
transformers.models.deprecated.ernie_m.modeling_ernie_m.ErnieMSelfAttention
|
from ....utils.deprecation import deprecate_kwarg
import math
from torch import nn, tensor
import torch
from typing import Optional, Union
from ....cache_utils import Cache
class ErnieMSelfAttention(nn.Module):
def __init__(self, config, position_embedding_type=None):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and (not hasattr(config, 'embedding_size')):
raise ValueError(f'The hidden size ({config.hidden_size}) is not a multiple of the number of attention heads ({config.num_attention_heads})')
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.q_proj = nn.Linear(config.hidden_size, self.all_head_size)
self.k_proj = nn.Linear(config.hidden_size, self.all_head_size)
self.v_proj = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
self.position_embedding_type = position_embedding_type or getattr(config, 'position_embedding_type', 'absolute')
if self.position_embedding_type == 'relative_key' or self.position_embedding_type == 'relative_key_query':
self.max_position_embeddings = config.max_position_embeddings
self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
self.is_decoder = config.is_decoder
def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor:
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(new_x_shape)
return x.permute(0, 2, 1, 3)
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_values: Optional[Cache]=None, output_attentions: Optional[bool]=False) -> tuple[torch.Tensor]:
mixed_query_layer = self.q_proj(hidden_states)
is_cross_attention = encoder_hidden_states is not None
if is_cross_attention and past_key_values is not None:
key_layer = past_key_values[0]
value_layer = past_key_values[1]
attention_mask = encoder_attention_mask
elif is_cross_attention:
key_layer = self.transpose_for_scores(self.k_proj(encoder_hidden_states))
value_layer = self.transpose_for_scores(self.v_proj(encoder_hidden_states))
attention_mask = encoder_attention_mask
elif past_key_values is not None:
key_layer = self.transpose_for_scores(self.k_proj(hidden_states))
value_layer = self.transpose_for_scores(self.v_proj(hidden_states))
key_layer = torch.cat([past_key_values[0], key_layer], dim=2)
value_layer = torch.cat([past_key_values[1], value_layer], dim=2)
else:
key_layer = self.transpose_for_scores(self.k_proj(hidden_states))
value_layer = self.transpose_for_scores(self.v_proj(hidden_states))
query_layer = self.transpose_for_scores(mixed_query_layer)
use_cache = past_key_values is not None
if self.is_decoder:
past_key_values = (key_layer, value_layer)
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
if self.position_embedding_type == 'relative_key' or self.position_embedding_type == 'relative_key_query':
query_length, key_length = (query_layer.shape[2], key_layer.shape[2])
if use_cache:
position_ids_l = torch.tensor(key_length - 1, dtype=torch.long, device=hidden_states.device).view(-1, 1)
else:
position_ids_l = torch.arange(query_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
position_ids_r = torch.arange(key_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
distance = position_ids_l - position_ids_r
positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
positional_embedding = positional_embedding.to(dtype=query_layer.dtype)
if self.position_embedding_type == 'relative_key':
relative_position_scores = torch.einsum('bhld,lrd->bhlr', query_layer, positional_embedding)
attention_scores = attention_scores + relative_position_scores
elif self.position_embedding_type == 'relative_key_query':
relative_position_scores_query = torch.einsum('bhld,lrd->bhlr', query_layer, positional_embedding)
relative_position_scores_key = torch.einsum('bhrd,lrd->bhlr', key_layer, positional_embedding)
attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
if attention_mask is not None:
attention_scores = attention_scores + attention_mask
attention_probs = nn.functional.softmax(attention_scores, dim=-1)
attention_probs = self.dropout(attention_probs)
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(new_context_layer_shape)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
if self.is_decoder:
outputs = outputs + (past_key_values,)
return outputs
|
class ErnieMSelfAttention(nn.Module):
def __init__(self, config, position_embedding_type=None):
pass
def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor:
pass
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_values: Optional[Cache]=None, output_attentions: Optional[bool]=False) -> tuple[torch.Tensor]:
pass
| 5
| 0
| 43
| 7
| 31
| 6
| 6
| 0.19
| 1
| 5
| 0
| 0
| 3
| 11
| 3
| 13
| 132
| 22
| 93
| 44
| 80
| 18
| 72
| 35
| 68
| 13
| 1
| 2
| 17
|
1,735
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/ernie_m/tokenization_ernie_m.py
|
transformers.models.deprecated.ernie_m.tokenization_ernie_m.ErnieMTokenizer
|
import os
from typing import Any, Optional
import sentencepiece as spm
from ....utils.import_utils import requires
from ....tokenization_utils import PreTrainedTokenizer
import unicodedata
@requires(backends=('sentencepiece',))
class ErnieMTokenizer(PreTrainedTokenizer):
"""
Constructs a Ernie-M tokenizer. It uses the `sentencepiece` tools to cut the words to sub-words.
Args:
sentencepiece_model_file (`str`):
The file path of sentencepiece model.
vocab_file (`str`, *optional*):
The file path of the vocabulary.
do_lower_case (`str`, *optional*, defaults to `True`):
Whether or not to lowercase the input when tokenizing.
unk_token (`str`, *optional*, defaults to `"[UNK]"`):
A special token representing the `unknown (out-of-vocabulary)` token. An unknown token is set to be
`unk_token` inorder to be converted to an ID.
sep_token (`str`, *optional*, defaults to `"[SEP]"`):
A special token separating two different sentences in the same input.
pad_token (`str`, *optional*, defaults to `"[PAD]"`):
A special token used to make arrays of tokens the same size for batching purposes.
cls_token (`str`, *optional*, defaults to `"[CLS]"`):
A special token used for sequence classification. It is the last token of the sequence when built with
special tokens.
mask_token (`str`, *optional*, defaults to `"[MASK]"`):
A special token representing a masked token. This is the token used in the masked language modeling task
which the model tries to predict the original unmasked ones.
"""
model_input_names: list[str] = ['input_ids']
vocab_files_names = VOCAB_FILES_NAMES
resource_files_names = RESOURCE_FILES_NAMES
def __init__(self, sentencepiece_model_ckpt, vocab_file=None, do_lower_case=False, encoding='utf8', unk_token='[UNK]', sep_token='[SEP]', pad_token='[PAD]', cls_token='[CLS]', mask_token='[MASK]', sp_model_kwargs: Optional[dict[str, Any]]=None, **kwargs) -> None:
self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
self.do_lower_case = do_lower_case
self.sentencepiece_model_ckpt = sentencepiece_model_ckpt
self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(sentencepiece_model_ckpt)
if vocab_file is not None:
self.vocab = self.load_vocab(filepath=vocab_file)
else:
self.vocab = {self.sp_model.id_to_piece(id): id for id in range(self.sp_model.get_piece_size())}
self.reverse_vocab = {v: k for k, v in self.vocab.items()}
super().__init__(do_lower_case=do_lower_case, unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, vocab_file=vocab_file, encoding=encoding, sp_model_kwargs=self.sp_model_kwargs, **kwargs)
def get_offset_mapping(self, text):
if text is None:
return None
split_tokens = self.tokenize(text)
normalized_text, char_mapping = ('', [])
for i, ch in enumerate(text):
if ch in self.SP_CHAR_MAPPING:
ch = self.SP_CHAR_MAPPING.get(ch)
else:
ch = unicodedata.normalize('NFKC', ch)
if self.is_whitespace(ch):
continue
normalized_text += ch
char_mapping.extend([i] * len(ch))
text, token_mapping, offset = (normalized_text, [], 0)
if self.do_lower_case:
text = text.lower()
for token in split_tokens:
if token[:1] == '▁':
token = token[1:]
start = text[offset:].index(token) + offset
end = start + len(token)
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1))
offset = end
return token_mapping
@property
def vocab_size(self):
return len(self.vocab)
def get_vocab(self):
return dict(self.vocab, **self.added_tokens_encoder)
def __getstate__(self):
state = self.__dict__.copy()
state['sp_model'] = None
return state
def __setstate__(self, d):
self.__dict__ = d
if not hasattr(self, 'sp_model_kwargs'):
self.sp_model_kwargs = {}
self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.sentencepiece_model_ckpt)
def clean_text(self, text):
"""Performs invalid character removal and whitespace cleanup on text."""
return ''.join((self.SP_CHAR_MAPPING.get(c, c) for c in text))
def _tokenize(self, text, enable_sampling=False, nbest_size=64, alpha=0.1):
"""Tokenize a string."""
if self.sp_model_kwargs.get('enable_sampling') is True:
enable_sampling = True
if self.sp_model_kwargs.get('alpha') is not None:
alpha = self.sp_model_kwargs.get('alpha')
if self.sp_model_kwargs.get('nbest_size') is not None:
nbest_size = self.sp_model_kwargs.get('nbest_size')
if not enable_sampling:
pieces = self.sp_model.EncodeAsPieces(text)
else:
pieces = self.sp_model.SampleEncodeAsPieces(text, nbest_size, alpha)
new_pieces = []
for pi, piece in enumerate(pieces):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(SPIECE_UNDERLINE) and pi != 0:
new_pieces.append(SPIECE_UNDERLINE)
continue
else:
continue
lst_i = 0
for i, chunk in enumerate(piece):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(chunk) or self.is_punct(chunk):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i])
new_pieces.append(chunk)
lst_i = i + 1
elif chunk.isdigit() and i > 0 and (not piece[i - 1].isdigit()):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i])
lst_i = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i])
lst_i = i
if len(piece) > lst_i:
new_pieces.append(piece[lst_i:])
return new_pieces
def convert_tokens_to_string(self, tokens):
"""Converts a sequence of tokens (strings for sub-words) in a single string."""
out_string = ''.join(tokens).replace(SPIECE_UNDERLINE, ' ').strip()
return out_string
def convert_ids_to_string(self, ids):
"""
Converts a sequence of tokens (strings for sub-words) in a single string.
"""
tokens = self.convert_ids_to_tokens(ids)
out_string = ''.join(tokens).replace(SPIECE_UNDERLINE, ' ').strip()
return out_string
def _convert_token_to_id(self, token):
return self.vocab.get(token, self.vocab.get(self.unk_token))
def _convert_id_to_token(self, index):
"""Converts an index (integer) in a token (str) using the vocab."""
return self.reverse_vocab.get(index, self.unk_token)
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. An ErnieM sequence has the following format:
- single sequence: `[CLS] X [SEP]`
- pair of sequences: `[CLS] A [SEP] [SEP] B [SEP]`
Args:
token_ids_0 (`list[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`list[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`list[int]`: List of input_id with the appropriate special tokens.
"""
if token_ids_1 is None:
return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
_cls = [self.cls_token_id]
_sep = [self.sep_token_id]
return _cls + token_ids_0 + _sep + _sep + token_ids_1 + _sep
def build_offset_mapping_with_special_tokens(self, offset_mapping_0, offset_mapping_1=None):
"""
Build offset map from a pair of offset map by concatenating and adding offsets of special tokens. An Ernie-M
offset_mapping has the following format:
- single sequence: `(0,0) X (0,0)`
- pair of sequences: `(0,0) A (0,0) (0,0) B (0,0)`
Args:
offset_mapping_ids_0 (`list[tuple]`):
List of char offsets to which the special tokens will be added.
offset_mapping_ids_1 (`list[tuple]`, *optional*):
Optional second list of wordpiece offsets for offset mapping pairs.
Returns:
`list[tuple]`: List of wordpiece offsets with the appropriate offsets of special tokens.
"""
if offset_mapping_1 is None:
return [(0, 0)] + offset_mapping_0 + [(0, 0)]
return [(0, 0)] + offset_mapping_0 + [(0, 0), (0, 0)] + offset_mapping_1 + [(0, 0)]
def get_special_tokens_mask(self, token_ids_0, token_ids_1=None, already_has_special_tokens=False):
"""
Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer `encode` method.
Args:
token_ids_0 (`list[int]`):
List of ids of the first sequence.
token_ids_1 (`list[int]`, *optional*):
Optional second list of IDs for sequence pairs.
already_has_special_tokens (`str`, *optional*, defaults to `False`):
Whether or not the token list is already formatted with special tokens for the model.
Returns:
`list[int]`:
The list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
"""
if already_has_special_tokens:
if token_ids_1 is not None:
raise ValueError('You should not supply a second sequence if the provided sequence of ids is already formatted with special tokens for the model.')
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_0]
if token_ids_1 is not None:
return [1] + [0] * len(token_ids_0) + [1, 1] + [0] * len(token_ids_1) + [1]
return [1] + [0] * len(token_ids_0) + [1]
def create_token_type_ids_from_sequences(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]:
"""
Create the token type IDs corresponding to the sequences passed. [What are token type
IDs?](../glossary#token-type-ids) Should be overridden in a subclass if the model has a special way of
building: those.
Args:
token_ids_0 (`list[int]`):
The first tokenized sequence.
token_ids_1 (`list[int]`, *optional*):
The second tokenized sequence.
Returns:
`list[int]`: The token type ids.
"""
if token_ids_1 is None:
return (len(token_ids_0) + 2) * [0]
return [0] * (len(token_ids_0) + 1) + [1] * (len(token_ids_1) + 3)
def is_ch_char(self, char):
"""
is_ch_char
"""
if '一' <= char <= '鿿':
return True
return False
def is_alpha(self, char):
"""
is_alpha
"""
if 'a' <= char <= 'z' or 'A' <= char <= 'Z':
return True
return False
def is_punct(self, char):
"""
is_punct
"""
if char in ',;:.?!~,;:。?!《》【】':
return True
return False
def is_whitespace(self, char):
"""
is whitespace
"""
if char == ' ' or char == '\t' or char == '\n' or (char == '\r'):
return True
if len(char) == 1:
cat = unicodedata.category(char)
if cat == 'Zs':
return True
return False
def load_vocab(self, filepath):
token_to_idx = {}
with open(filepath, 'r', encoding='utf-8') as f:
for index, line in enumerate(f):
token = line.rstrip('\n')
token_to_idx[token] = int(index)
return token_to_idx
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]:
index = 0
if os.path.isdir(save_directory):
vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
else:
vocab_file = (filename_prefix + '-' if filename_prefix else '') + save_directory
with open(vocab_file, 'w', encoding='utf-8') as writer:
for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]):
if index != token_index:
logger.warning(f'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive. Please check that the vocabulary is not corrupted!')
index = token_index
writer.write(token + '\n')
index += 1
tokenizer_model_file = os.path.join(save_directory, 'sentencepiece.bpe.model')
with open(tokenizer_model_file, 'wb') as fi:
content_spiece_model = self.sp_model.serialized_model_proto()
fi.write(content_spiece_model)
return (vocab_file,)
|
@requires(backends=('sentencepiece',))
class ErnieMTokenizer(PreTrainedTokenizer):
'''
Constructs a Ernie-M tokenizer. It uses the `sentencepiece` tools to cut the words to sub-words.
Args:
sentencepiece_model_file (`str`):
The file path of sentencepiece model.
vocab_file (`str`, *optional*):
The file path of the vocabulary.
do_lower_case (`str`, *optional*, defaults to `True`):
Whether or not to lowercase the input when tokenizing.
unk_token (`str`, *optional*, defaults to `"[UNK]"`):
A special token representing the `unknown (out-of-vocabulary)` token. An unknown token is set to be
`unk_token` inorder to be converted to an ID.
sep_token (`str`, *optional*, defaults to `"[SEP]"`):
A special token separating two different sentences in the same input.
pad_token (`str`, *optional*, defaults to `"[PAD]"`):
A special token used to make arrays of tokens the same size for batching purposes.
cls_token (`str`, *optional*, defaults to `"[CLS]"`):
A special token used for sequence classification. It is the last token of the sequence when built with
special tokens.
mask_token (`str`, *optional*, defaults to `"[MASK]"`):
A special token representing a masked token. This is the token used in the masked language modeling task
which the model tries to predict the original unmasked ones.
'''
def __init__(self, sentencepiece_model_ckpt, vocab_file=None, do_lower_case=False, encoding='utf8', unk_token='[UNK]', sep_token='[SEP]', pad_token='[PAD]', cls_token='[CLS]', mask_token='[MASK]', sp_model_kwargs: Optional[dict[str, Any]]=None, **kwargs) -> None:
pass
def get_offset_mapping(self, text):
pass
@property
def vocab_size(self):
pass
def get_vocab(self):
pass
def __getstate__(self):
pass
def __setstate__(self, d):
pass
def clean_text(self, text):
'''Performs invalid character removal and whitespace cleanup on text.'''
pass
def _tokenize(self, text, enable_sampling=False, nbest_size=64, alpha=0.1):
'''Tokenize a string.'''
pass
def convert_tokens_to_string(self, tokens):
'''Converts a sequence of tokens (strings for sub-words) in a single string.'''
pass
def convert_ids_to_string(self, ids):
'''
Converts a sequence of tokens (strings for sub-words) in a single string.
'''
pass
def _convert_token_to_id(self, token):
pass
def _convert_id_to_token(self, index):
'''Converts an index (integer) in a token (str) using the vocab.'''
pass
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
'''
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. An ErnieM sequence has the following format:
- single sequence: `[CLS] X [SEP]`
- pair of sequences: `[CLS] A [SEP] [SEP] B [SEP]`
Args:
token_ids_0 (`list[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`list[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`list[int]`: List of input_id with the appropriate special tokens.
'''
pass
def build_offset_mapping_with_special_tokens(self, offset_mapping_0, offset_mapping_1=None):
'''
Build offset map from a pair of offset map by concatenating and adding offsets of special tokens. An Ernie-M
offset_mapping has the following format:
- single sequence: `(0,0) X (0,0)`
- pair of sequences: `(0,0) A (0,0) (0,0) B (0,0)`
Args:
offset_mapping_ids_0 (`list[tuple]`):
List of char offsets to which the special tokens will be added.
offset_mapping_ids_1 (`list[tuple]`, *optional*):
Optional second list of wordpiece offsets for offset mapping pairs.
Returns:
`list[tuple]`: List of wordpiece offsets with the appropriate offsets of special tokens.
'''
pass
def get_special_tokens_mask(self, token_ids_0, token_ids_1=None, already_has_special_tokens=False):
'''
Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer `encode` method.
Args:
token_ids_0 (`list[int]`):
List of ids of the first sequence.
token_ids_1 (`list[int]`, *optional*):
Optional second list of IDs for sequence pairs.
already_has_special_tokens (`str`, *optional*, defaults to `False`):
Whether or not the token list is already formatted with special tokens for the model.
Returns:
`list[int]`:
The list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
'''
pass
def create_token_type_ids_from_sequences(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]:
'''
Create the token type IDs corresponding to the sequences passed. [What are token type
IDs?](../glossary#token-type-ids) Should be overridden in a subclass if the model has a special way of
building: those.
Args:
token_ids_0 (`list[int]`):
The first tokenized sequence.
token_ids_1 (`list[int]`, *optional*):
The second tokenized sequence.
Returns:
`list[int]`: The token type ids.
'''
pass
def is_ch_char(self, char):
'''
is_ch_char
'''
pass
def is_alpha(self, char):
'''
is_alpha
'''
pass
def is_punct(self, char):
'''
is_punct
'''
pass
def is_whitespace(self, char):
'''
is whitespace
'''
pass
def load_vocab(self, filepath):
pass
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]:
pass
| 25
| 14
| 14
| 1
| 9
| 4
| 3
| 0.5
| 1
| 8
| 0
| 0
| 22
| 7
| 22
| 111
| 365
| 52
| 209
| 80
| 170
| 104
| 167
| 60
| 144
| 17
| 3
| 4
| 67
|
1,736
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/gptsan_japanese/configuration_gptsan_japanese.py
|
transformers.models.deprecated.gptsan_japanese.configuration_gptsan_japanese.GPTSanJapaneseConfig
|
from ....configuration_utils import PretrainedConfig
class GPTSanJapaneseConfig(PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`GPTSanJapaneseModel`]. It is used to instantiate
a GPTSANJapanese model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the GPTSANJapanese
[Tanrei/GPTSAN-japanese](https://huggingface.co/Tanrei/GPTSAN-japanese) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Arguments:
vocab_size (`int`, *optional*, defaults to 36000):
Vocabulary size of the GPTSANJapanese model. Defines the number of different tokens that can be represented
by the `inputs_ids` passed when calling [`GPTSanJapaneseModel`].
max_position_embeddings (`int`, *optional*, defaults to 1280):
The maximum sequence length that this model might ever be used with. Defaults set this to 1280.
d_model (`int`, *optional*, defaults to 1024):
Size of the encoder layers and the pooler layer.
d_ff (`int`, *optional*, defaults to 8192):
Size of the intermediate feed forward layer in each `SwitchTransformersBlock`.
d_ext (`int`, *optional*, defaults to 4096):
Size of the intermediate feed forward layer in each Extra-layers.
d_spout (`int`, *optional*, defaults to 128):
Size of the `spout` vector.
num_switch_layers (`int`, *optional*, defaults to 10):
Number of layers in the Switch Transformer layer.
num_ext_layers (`int`, *optional*, defaults to 0):
Number of layers in the Extra-layers.
num_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer encoder.
num_experts (`int`, *optional*, defaults to 16):
Number of experts for each SwitchTransformer layer.
expert_capacity (`int`, *optional*, defaults to 128):
Number of tokens that can be stored in each expert. If set to 1, the model will behave like a regular
Transformer.
dropout_rate (`float`, *optional*, defaults to 0.0):
The ratio for all dropout layers.
layer_norm_eps (`float`, *optional*, defaults to 1e-5):
The epsilon used by the layer normalization layers.
router_bias (`bool`, *optional*, defaults to `False`):
Whether to add a bias to the router.
router_jitter_noise (`float`, *optional*, defaults to 0.0):
Amount of noise to add to the router. Set it to 0.0 during prediction or set small value (usually 1e-2)
during training.
router_dtype (`str`, *optional*, default to `"float32"`):
The `dtype` used for the routers. It is preferable to keep the `dtype` to `"float32"` as specified in the
*selective precision* discussion in [the paper](https://huggingface.co/papers/2101.03961).
router_ignore_padding_tokens (`bool`, *optional*, defaults to `False`):
Whether to ignore padding tokens when routing.
output_hidden_states (`bool`, *optional*, default to `False`):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
output_attentions (`bool`, *optional*, defaults to `False`):
Whether or not to return the attentions tensors of all attention layers.
initializer_factor (`float`, *optional*, defaults to 0.002):
A factor for initializing all weight matrices.
output_router_logits (`bool`, *optional*, default to `False`):
Whether or not to return the router logits of all experts.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models)
"""
model_type = 'gptsan-japanese'
keys_to_ignore_at_inference = ['past_key_values']
attribute_map = {'hidden_size': 'd_model', 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'}
def __init__(self, vocab_size=36000, max_position_embeddings=1280, d_model=1024, d_ff=8192, d_ext=4096, d_spout=128, num_switch_layers=10, num_ext_layers=0, num_heads=16, num_experts=16, expert_capacity=128, dropout_rate=0.0, layer_norm_epsilon=1e-05, router_bias=False, router_jitter_noise=0.0, router_dtype='float32', router_ignore_padding_tokens=False, output_hidden_states=False, output_attentions=False, initializer_factor=0.002, output_router_logits=False, use_cache=True, separator_token_id=35998, pad_token_id=35995, eos_token_id=35999, **kwargs):
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.d_model = d_model
self.d_ff = d_ff
self.d_ext = d_ext
self.d_spout = d_spout
self.num_switch_layers = num_switch_layers
self.num_ext_layers = num_ext_layers
self.num_layers = num_switch_layers + num_ext_layers
self.num_heads = num_heads
self.num_experts = num_experts
self.expert_capacity = expert_capacity
self.dropout_rate = dropout_rate
self.layer_norm_epsilon = layer_norm_epsilon
self.router_bias = router_bias
self.router_jitter_noise = router_jitter_noise
self.router_dtype = router_dtype
self.router_ignore_padding_tokens = router_ignore_padding_tokens
self.output_hidden_states = output_hidden_states
self.output_attentions = output_attentions
self.initializer_factor = initializer_factor
self.output_router_logits = output_router_logits
self.use_cache = use_cache
super().__init__(separator_token_id=separator_token_id, pad_token_id=pad_token_id, eos_token_id=eos_token_id, **kwargs)
|
class GPTSanJapaneseConfig(PretrainedConfig):
'''
This is the configuration class to store the configuration of a [`GPTSanJapaneseModel`]. It is used to instantiate
a GPTSANJapanese model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the GPTSANJapanese
[Tanrei/GPTSAN-japanese](https://huggingface.co/Tanrei/GPTSAN-japanese) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Arguments:
vocab_size (`int`, *optional*, defaults to 36000):
Vocabulary size of the GPTSANJapanese model. Defines the number of different tokens that can be represented
by the `inputs_ids` passed when calling [`GPTSanJapaneseModel`].
max_position_embeddings (`int`, *optional*, defaults to 1280):
The maximum sequence length that this model might ever be used with. Defaults set this to 1280.
d_model (`int`, *optional*, defaults to 1024):
Size of the encoder layers and the pooler layer.
d_ff (`int`, *optional*, defaults to 8192):
Size of the intermediate feed forward layer in each `SwitchTransformersBlock`.
d_ext (`int`, *optional*, defaults to 4096):
Size of the intermediate feed forward layer in each Extra-layers.
d_spout (`int`, *optional*, defaults to 128):
Size of the `spout` vector.
num_switch_layers (`int`, *optional*, defaults to 10):
Number of layers in the Switch Transformer layer.
num_ext_layers (`int`, *optional*, defaults to 0):
Number of layers in the Extra-layers.
num_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer encoder.
num_experts (`int`, *optional*, defaults to 16):
Number of experts for each SwitchTransformer layer.
expert_capacity (`int`, *optional*, defaults to 128):
Number of tokens that can be stored in each expert. If set to 1, the model will behave like a regular
Transformer.
dropout_rate (`float`, *optional*, defaults to 0.0):
The ratio for all dropout layers.
layer_norm_eps (`float`, *optional*, defaults to 1e-5):
The epsilon used by the layer normalization layers.
router_bias (`bool`, *optional*, defaults to `False`):
Whether to add a bias to the router.
router_jitter_noise (`float`, *optional*, defaults to 0.0):
Amount of noise to add to the router. Set it to 0.0 during prediction or set small value (usually 1e-2)
during training.
router_dtype (`str`, *optional*, default to `"float32"`):
The `dtype` used for the routers. It is preferable to keep the `dtype` to `"float32"` as specified in the
*selective precision* discussion in [the paper](https://huggingface.co/papers/2101.03961).
router_ignore_padding_tokens (`bool`, *optional*, defaults to `False`):
Whether to ignore padding tokens when routing.
output_hidden_states (`bool`, *optional*, default to `False`):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
output_attentions (`bool`, *optional*, defaults to `False`):
Whether or not to return the attentions tensors of all attention layers.
initializer_factor (`float`, *optional*, defaults to 0.002):
A factor for initializing all weight matrices.
output_router_logits (`bool`, *optional*, default to `False`):
Whether or not to return the router logits of all experts.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models)
'''
def __init__(self, vocab_size=36000, max_position_embeddings=1280, d_model=1024, d_ff=8192, d_ext=4096, d_spout=128, num_switch_layers=10, num_ext_layers=0, num_heads=16, num_experts=16, expert_capacity=128, dropout_rate=0.0, layer_norm_epsilon=1e-05, router_bias=False, router_jitter_noise=0.0, router_dtype='float32', router_ignore_padding_tokens=False, output_hidden_states=False, output_attentions=False, initializer_factor=0.002, output_router_logits=False, use_cache=True, separator_token_id=35998, pad_token_id=35995, eos_token_id=35999, **kwargs):
pass
| 2
| 1
| 59
| 1
| 58
| 0
| 1
| 0.85
| 1
| 1
| 0
| 0
| 1
| 23
| 1
| 33
| 131
| 5
| 68
| 56
| 38
| 58
| 29
| 28
| 27
| 1
| 2
| 0
| 1
|
1,737
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/gptsan_japanese/modeling_gptsan_japanese.py
|
transformers.models.deprecated.gptsan_japanese.modeling_gptsan_japanese.GPTSanJapaneseAttention
|
from typing import Optional, Union
from ....cache_utils import Cache
import torch.nn as nn
import torch
from .configuration_gptsan_japanese import GPTSanJapaneseConfig
from ....utils.deprecation import deprecate_kwarg
class GPTSanJapaneseAttention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, embed_dim: int, num_heads: int, dropout: float=0.0, is_decoder: bool=False, bias: bool=True, is_causal: bool=False, config: Optional[GPTSanJapaneseConfig]=None):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
self.config = config
if self.head_dim * num_heads != self.embed_dim:
raise ValueError(f'embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {num_heads}).')
self.scaling = self.head_dim ** (-0.5)
self.is_decoder = is_decoder
self.is_causal = is_causal
self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, attention_mask: Optional[torch.Tensor]=None, layer_head_mask: Optional[torch.Tensor]=None, output_attentions: bool=False) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
"""Input shape: Batch x Time x Channel"""
is_cross_attention = key_value_states is not None
bsz, tgt_len, _ = hidden_states.size()
query_states = self.q_proj(hidden_states) * self.scaling
if is_cross_attention and past_key_values is not None and (past_key_values[0].shape[2] == key_value_states.shape[1]):
key_states = past_key_values[0]
value_states = past_key_values[1]
elif is_cross_attention:
key_states = self._shape(self.k_proj(key_value_states), -1, bsz)
value_states = self._shape(self.v_proj(key_value_states), -1, bsz)
elif past_key_values is not None:
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
key_states = torch.cat([past_key_values[0], key_states], dim=2)
value_states = torch.cat([past_key_values[1], value_states], dim=2)
else:
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
if self.is_decoder:
past_key_values = (key_states, value_states)
proj_shape = (bsz * self.num_heads, -1, self.head_dim)
query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
key_states = key_states.reshape(*proj_shape)
value_states = value_states.reshape(*proj_shape)
src_len = key_states.size(1)
attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):
raise ValueError(f'Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is {attn_weights.size()}')
if attention_mask is not None:
if attention_mask.size() != (bsz, 1, tgt_len, src_len):
raise ValueError(f'Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}')
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
if layer_head_mask is not None:
if layer_head_mask.size() != (self.num_heads,):
raise ValueError(f'Head mask for a single layer should be of size {(self.num_heads,)}, but is {layer_head_mask.size()}')
attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
if output_attentions:
attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)
else:
attn_weights_reshaped = None
attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
attn_output = torch.bmm(attn_probs, value_states)
if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
raise ValueError(f'`attn_output` should be of size {(bsz * self.num_heads, tgt_len, self.head_dim)}, but is {attn_output.size()}')
attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
attn_output = attn_output.transpose(1, 2)
attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim)
attn_output = self.out_proj(attn_output)
return (attn_output, attn_weights_reshaped, past_key_values)
|
class GPTSanJapaneseAttention(nn.Module):
'''Multi-headed attention from 'Attention Is All You Need' paper'''
def __init__(self, embed_dim: int, num_heads: int, dropout: float=0.0, is_decoder: bool=False, bias: bool=True, is_causal: bool=False, config: Optional[GPTSanJapaneseConfig]=None):
pass
def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
pass
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, attention_mask: Optional[torch.Tensor]=None, layer_head_mask: Optional[torch.Tensor]=None, output_attentions: bool=False) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
'''Input shape: Batch x Time x Channel'''
pass
| 5
| 2
| 50
| 7
| 35
| 8
| 5
| 0.24
| 1
| 7
| 1
| 0
| 3
| 12
| 3
| 13
| 156
| 23
| 107
| 44
| 86
| 26
| 68
| 27
| 64
| 12
| 1
| 2
| 15
|
1,738
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/gptsan_japanese/modeling_gptsan_japanese.py
|
transformers.models.deprecated.gptsan_japanese.modeling_gptsan_japanese.GPTSanJapaneseBlock
|
import torch
from typing import Optional, Union
from ....utils.deprecation import deprecate_kwarg
from ....cache_utils import Cache
import torch.nn as nn
class GPTSanJapaneseBlock(nn.Module):
"""
Self Attention and FFN Unit
"""
def __init__(self, config, ext_layer=False):
super().__init__()
self.self_attn = GPTSanJapaneseLayerSelfAttention(config)
self.feed_forward = GPTSanJapaneseLayerDenseFF(config) if ext_layer else GPTSanJapaneseLayerSparseFF(config)
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: Optional[tuple[torch.FloatTensor]], past_key_values: Optional[Cache]=None, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=False, output_attentions: Optional[bool]=False, output_router_tuple: Optional[bool]=False) -> tuple[Union[torch.Tensor, tuple[torch.Tensor]], ...]:
"""
GPTSAN transformer block.
Args:
hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
if the model is configured as a decoder.
past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up
decoding. If `past_key_values` are used, the user can optionally input only the last
`decoder_input_ids` (those that don't have their past key value states given to this model) of shape
`(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`.
attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used
in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
head_mask (`numpy.ndarray` of shape `({0})`, `optional):
Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
(see `past_key_values`).
output_attentions (`bool`) :
output attention probabirities.
output_router_tuple:
output experts router logits and expert id.
Returns:
tuple[torch.Tensor[num_groups, tokens_per_group, hidden_dim],...]
"""
atten_out = self.self_attn(hidden_states=hidden_states, past_key_values=past_key_values, attention_mask=attention_mask, head_mask=head_mask, use_cache=use_cache, output_attentions=output_attentions)
attention_output = atten_out[0]
if isinstance(self.feed_forward, GPTSanJapaneseLayerSparseFF):
sparse_out = self.feed_forward(attention_output, output_router_tuple)
if output_router_tuple:
hidden, router_tuple = sparse_out
else:
hidden = sparse_out
else:
hidden = self.feed_forward(attention_output)
outputs = (hidden,) + atten_out[1:]
if isinstance(self.feed_forward, GPTSanJapaneseLayerSparseFF) and output_router_tuple:
outputs += (router_tuple,)
return outputs
|
class GPTSanJapaneseBlock(nn.Module):
'''
Self Attention and FFN Unit
'''
def __init__(self, config, ext_layer=False):
pass
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: Optional[tuple[torch.FloatTensor]], past_key_values: Optional[Cache]=None, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=False, output_attentions: Optional[bool]=False, output_router_tuple: Optional[bool]=False) -> tuple[Union[torch.Tensor, tuple[torch.Tensor]], ...]:
'''
GPTSAN transformer block.
Args:
hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
if the model is configured as a decoder.
past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up
decoding. If `past_key_values` are used, the user can optionally input only the last
`decoder_input_ids` (those that don't have their past key value states given to this model) of shape
`(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`.
attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used
in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
head_mask (`numpy.ndarray` of shape `({0})`, `optional):
Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
(see `past_key_values`).
output_attentions (`bool`) :
output attention probabirities.
output_router_tuple:
output experts router logits and expert id.
Returns:
tuple[torch.Tensor[num_groups, tokens_per_group, hidden_dim],...]
'''
pass
| 4
| 2
| 37
| 5
| 18
| 15
| 3
| 0.92
| 1
| 6
| 3
| 0
| 2
| 2
| 2
| 12
| 80
| 11
| 36
| 19
| 24
| 33
| 18
| 10
| 15
| 4
| 1
| 2
| 6
|
1,739
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/gptsan_japanese/modeling_gptsan_japanese.py
|
transformers.models.deprecated.gptsan_japanese.modeling_gptsan_japanese.GPTSanJapaneseDenseActDense
|
from ....activations import ACT2FN
from .configuration_gptsan_japanese import GPTSanJapaneseConfig
import torch.nn as nn
class GPTSanJapaneseDenseActDense(nn.Module):
"""
FFN Layer for Switch Transformer and Extra layers
GPTSAN can mix Switch Transformer layers and normal Transformer layers This class is used as Expert in Switch
Transformer layers and as FFN in regular Transformer layers. RELU is used in the Switch Transformer layer, and
Swish is used in the normal Transformer layer, so there is a choice of which is used in the argument.
"""
def __init__(self, config: GPTSanJapaneseConfig, ext_layer=False):
super().__init__()
d_inter = config.d_ext if ext_layer else config.d_ff
self.wi = nn.Linear(config.d_model, d_inter, bias=ext_layer)
self.wo = nn.Linear(d_inter, config.d_model, bias=ext_layer)
self.dropout = nn.Identity() if ext_layer else nn.Dropout(config.dropout_rate)
self.act = ACT2FN['swish' if ext_layer else 'relu']
def forward(self, hidden_states):
"""
Args:
hidden_states (`torch.Tensor`) :
[num_groups, tokens_per_group, hidden_dim] inputs to send to experts.
Returns:
torch.Tensor[num_groups, tokens_per_group, hidden_dim]
"""
hidden_states = self.wi(hidden_states)
hidden_states = self.act(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.wo(hidden_states)
return hidden_states
|
class GPTSanJapaneseDenseActDense(nn.Module):
'''
FFN Layer for Switch Transformer and Extra layers
GPTSAN can mix Switch Transformer layers and normal Transformer layers This class is used as Expert in Switch
Transformer layers and as FFN in regular Transformer layers. RELU is used in the Switch Transformer layer, and
Swish is used in the normal Transformer layer, so there is a choice of which is used in the argument.
'''
def __init__(self, config: GPTSanJapaneseConfig, ext_layer=False):
pass
def forward(self, hidden_states):
'''
Args:
hidden_states (`torch.Tensor`) :
[num_groups, tokens_per_group, hidden_dim] inputs to send to experts.
Returns:
torch.Tensor[num_groups, tokens_per_group, hidden_dim]
'''
pass
| 3
| 2
| 11
| 1
| 7
| 4
| 3
| 0.93
| 1
| 2
| 1
| 0
| 2
| 4
| 2
| 12
| 32
| 5
| 14
| 8
| 11
| 13
| 14
| 8
| 11
| 4
| 1
| 0
| 5
|
1,740
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/gptsan_japanese/modeling_gptsan_japanese.py
|
transformers.models.deprecated.gptsan_japanese.modeling_gptsan_japanese.GPTSanJapaneseForConditionalGeneration
|
from typing import Optional, Union
import torch.nn as nn
from .configuration_gptsan_japanese import GPTSanJapaneseConfig
from ....cache_utils import Cache
from ....utils import DUMMY_INPUTS, DUMMY_MASK, add_start_docstrings, add_start_docstrings_to_model_forward, is_torch_fx_proxy, logging
from ....modeling_outputs import MoECausalLMOutputWithPast, MoEModelOutputWithPastAndCrossAttentions
import torch
@add_start_docstrings('The bare GPTSAN-japanese Model with a language modeling head.', GPTSAN_JAPANESE_START_DOCSTRING)
class GPTSanJapaneseForConditionalGeneration(GPTSanJapanesePreTrainedModel):
_tied_weights_keys = ['lm_head.weight']
def __init__(self, config: GPTSanJapaneseConfig):
super().__init__(config)
self.model = GPTSanJapaneseModel(config)
self.register_buffer('final_logits_bias', torch.zeros([1, config.vocab_size]))
self.lm_head = nn.Linear(config.d_model, config.vocab_size, bias=False)
if not self.config.torchscript:
self.lm_head.weight = self.model.embed_tokens.weight
@add_start_docstrings_to_model_forward(GPTSAN_JAPANESE_INPUTS_DOCSTRING)
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.FloatTensor]=None, spout: Optional[torch.FloatTensor]=None, past_key_values: Optional[Cache]=None, head_mask: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=False, inputs_embeds: Optional[torch.FloatTensor]=None, decoder_inputs_embeds: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, output_router_logits: Optional[bool]=None, labels: Optional[torch.LongTensor]=None) -> Union[tuple[torch.FloatTensor], MoECausalLMOutputWithPast]:
"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification loss. Indices should be in `[-100, 0, ...,
config.vocab_size - 1]`. All labels set to `-100` are ignored (masked), the loss is only computed for
labels in `[0, ..., config.vocab_size]`
Returns:
`MoECausalLMOutputWithPast` or `tuple` if `return_dict` returns MoECausalLMOutputWithPast instead of tuple
Example:
Text Generation with regular LM Model
```python
>>> from transformers import AutoModel, AutoTokenizer, trainer_utils
>>> device = "cuda"
>>> model = AutoModel.from_pretrained("Tanrei/GPTSAN-japanese").to(device)
>>> tokenizer = AutoTokenizer.from_pretrained("Tanrei/GPTSAN-japanese")
>>> x_token = tokenizer("織田信長は、", return_tensors="pt")
>>> trainer_utils.set_seed(30)
>>> input_ids = x_token.input_ids.to(device)
>>> gen_token = model.generate(input_ids, max_new_tokens=50)
>>> tokenizer.decode(gen_token[0])
"織田信長は、政治・軍事の中枢まで掌握した政治家であり、日本史上類を見ない驚異的な軍事侵攻を続け..."
```
Text Generation with Prefix-LM Model
```python
>>> from transformers import AutoModel, AutoTokenizer, trainer_utils
>>> device = "cuda"
>>> model = AutoModel.from_pretrained("Tanrei/GPTSAN-japanese").to(device)
>>> tokenizer = AutoTokenizer.from_pretrained("Tanrei/GPTSAN-japanese")
>>> x_token = tokenizer("", prefix_text="織田信長は、", return_tensors="pt")
>>> trainer_utils.set_seed(30)
>>> input_ids = x_token.input_ids.to(device)
>>> token_type_ids = x_token.token_type_ids.to(device)
>>> gen_token = model.generate(input_ids, token_type_ids=token_type_ids, max_new_tokens=50)
>>> tokenizer.decode(gen_token[0])
"織田信長は、政治・外交で数々の戦果を上げるが、1568年からは、いわゆる本能寺の変で細川晴元に暗殺される..."
```
Simultaneously Text Generation And Masked Language Model
```python
>>> from transformers import AutoModel, AutoTokenizer, trainer_utils
>>> device = "cuda"
>>> model = AutoModel.from_pretrained("Tanrei/GPTSAN-japanese").to(device)
>>> tokenizer = AutoTokenizer.from_pretrained("Tanrei/GPTSAN-japanese")
>>> masked_sentence = "武田信玄は、<|inputmask|>時代ファンならぜひ押さえ<|inputmask|>きたい名将の一人。"
>>> x_token = tokenizer("", prefix_text=masked_sentence, return_tensors="pt")
>>> trainer_utils.set_seed(30)
>>> input_ids = x_token.input_ids.to(device)
>>> token_type_ids = x_token.token_type_ids.to(device)
>>> out_lm_token = model.generate(input_ids, token_type_ids=token_type_ids, max_new_tokens=50)
>>> out_mlm_token = model(input_ids, token_type_ids=token_type_ids).logits.argmax(axis=-1)
>>> tokenizer.decode(out_mlm_token[0])
"武田信玄は、戦国時代ファンならぜひ押さえておきたい名将の一人。"
>>> tokenizer.decode(out_lm_token[0][input_ids.shape[1] :])
"武田氏の三代に渡った武田家のひとり\\n甲斐市に住む、日本史上最大の戦国大名。..."
```"""
SEG_TOKEN = self.config.separator_token_id
use_cache = use_cache or self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
model_return_dict = True
num_precontext = None
if input_ids is not None:
num_batch = input_ids.shape[0]
num_precontext = torch.zeros([num_batch]).int().to(input_ids.device)
where_separators = torch.where(input_ids == SEG_TOKEN)
num_precontext[where_separators[0]] += where_separators[1]
num_precontext = num_precontext.unsqueeze(1)
outputs = self.model(input_ids, attention_mask, token_type_ids, spout, past_key_values, head_mask, use_cache, inputs_embeds, decoder_inputs_embeds, output_attentions, output_hidden_states, model_return_dict, output_router_logits, num_precontext)
lm_logits = self.lm_head(outputs[0])
if lm_logits.shape[-1] == self.final_logits_bias.shape[-1]:
lm_logits = lm_logits + self.final_logits_bias
loss = None
z_loss = None
router_probs = None
aux_loss = None
if labels is not None:
labels = labels.to(lm_logits.device)
loss_fct = nn.CrossEntropyLoss(ignore_index=-100)
if output_router_logits:
router_logits, expert_indexes = self._unpack_router_logits(outputs.router_probs)
z_loss = router_z_loss_func(router_logits)
router_probs = nn.Softmax(dim=-1)(router_logits)
aux_loss = load_balancing_loss_func(router_probs, expert_indexes)
loss = loss_fct(lm_logits.view(-1, lm_logits.size(-1)), labels.view(-1))
if not return_dict:
return tuple((v for v in [loss, lm_logits, outputs.past_key_values, outputs.hidden_states, outputs.router_probs, z_loss, aux_loss] if v is not None))
return MoECausalLMOutputWithPast(loss=loss, logits=lm_logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, router_logits=outputs.router_probs, z_loss=z_loss, aux_loss=aux_loss)
def prepare_inputs_for_generation(self, input_ids: torch.LongTensor, attention_mask: torch.FloatTensor, token_type_ids: Optional[torch.FloatTensor]=None, spout: Optional[Union[list, torch.FloatTensor]]=None, past_key_values: Optional[Cache]=None, **kwargs):
if isinstance(spout, list):
spout = torch.tensor(spout).float()
if input_ids is not None:
spout = spout.to(input_ids.device)
if past_key_values is not None:
return {'input_ids': input_ids[:, -1:] if input_ids is not None else None, 'attention_mask': attention_mask, 'token_type_ids': token_type_ids[:, -1:] if token_type_ids is not None else None, 'spout': spout, 'past_key_values': past_key_values}
return {'input_ids': input_ids, 'attention_mask': attention_mask, 'token_type_ids': token_type_ids, 'spout': spout, 'past_key_values': None}
def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor):
return self._shift_right(labels)
def resize_token_embeddings(self, new_num_tokens: int, pad_to_multiple_of: Optional[int]=None, mean_resizing: bool=True) -> nn.Embedding:
new_embeddings = super().resize_token_embeddings(new_num_tokens, pad_to_multiple_of, mean_resizing)
self._resize_final_logits_bias(new_embeddings.weight.shape[0])
return new_embeddings
def _resize_final_logits_bias(self, new_num_tokens: int) -> None:
old_num_tokens = self.final_logits_bias.shape[-1]
if new_num_tokens <= old_num_tokens:
new_bias = self.final_logits_bias[:, :new_num_tokens]
else:
extra_bias = torch.zeros((1, new_num_tokens - old_num_tokens), device=self.final_logits_bias.device)
new_bias = torch.cat([self.final_logits_bias, extra_bias], dim=1)
self.register_buffer('final_logits_bias', new_bias)
def get_input_embeddings(self):
return self.model.get_input_embeddings()
def set_input_embeddings(self, new_embeddings):
self.model.set_input_embeddings(new_embeddings)
def _unpack_router_logits(self, router_outputs):
total_router_logits = []
total_expert_indexes = []
for router_output in router_outputs:
if len(router_output[0].shape) > 1:
router_logits, expert_indexes = router_output
total_router_logits.append(router_logits)
total_expert_indexes.append(expert_indexes)
return (torch.cat(total_router_logits, dim=1), torch.cat(total_expert_indexes, dim=1))
|
@add_start_docstrings('The bare GPTSAN-japanese Model with a language modeling head.', GPTSAN_JAPANESE_START_DOCSTRING)
class GPTSanJapaneseForConditionalGeneration(GPTSanJapanesePreTrainedModel):
def __init__(self, config: GPTSanJapaneseConfig):
pass
@add_start_docstrings_to_model_forward(GPTSAN_JAPANESE_INPUTS_DOCSTRING)
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.FloatTensor]=None, spout: Optional[torch.FloatTensor]=None, past_key_values: Optional[Cache]=None, head_mask: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=False, inputs_embeds: Optional[torch.FloatTensor]=None, decoder_inputs_embeds: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, output_router_logits: Optional[bool]=None, labels: Optional[torch.LongTensor]=None) -> Union[tuple[torch.FloatTensor], MoECausalLMOutputWithPast]:
'''
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification loss. Indices should be in `[-100, 0, ...,
config.vocab_size - 1]`. All labels set to `-100` are ignored (masked), the loss is only computed for
labels in `[0, ..., config.vocab_size]`
Returns:
`MoECausalLMOutputWithPast` or `tuple` if `return_dict` returns MoECausalLMOutputWithPast instead of tuple
Example:
Text Generation with regular LM Model
```python
>>> from transformers import AutoModel, AutoTokenizer, trainer_utils
>>> device = "cuda"
>>> model = AutoModel.from_pretrained("Tanrei/GPTSAN-japanese").to(device)
>>> tokenizer = AutoTokenizer.from_pretrained("Tanrei/GPTSAN-japanese")
>>> x_token = tokenizer("織田信長は、", return_tensors="pt")
>>> trainer_utils.set_seed(30)
>>> input_ids = x_token.input_ids.to(device)
>>> gen_token = model.generate(input_ids, max_new_tokens=50)
>>> tokenizer.decode(gen_token[0])
"織田信長は、政治・軍事の中枢まで掌握した政治家であり、日本史上類を見ない驚異的な軍事侵攻を続け..."
```
Text Generation with Prefix-LM Model
```python
>>> from transformers import AutoModel, AutoTokenizer, trainer_utils
>>> device = "cuda"
>>> model = AutoModel.from_pretrained("Tanrei/GPTSAN-japanese").to(device)
>>> tokenizer = AutoTokenizer.from_pretrained("Tanrei/GPTSAN-japanese")
>>> x_token = tokenizer("", prefix_text="織田信長は、", return_tensors="pt")
>>> trainer_utils.set_seed(30)
>>> input_ids = x_token.input_ids.to(device)
>>> token_type_ids = x_token.token_type_ids.to(device)
>>> gen_token = model.generate(input_ids, token_type_ids=token_type_ids, max_new_tokens=50)
>>> tokenizer.decode(gen_token[0])
"織田信長は、政治・外交で数々の戦果を上げるが、1568年からは、いわゆる本能寺の変で細川晴元に暗殺される..."
```
Simultaneously Text Generation And Masked Language Model
```python
>>> from transformers import AutoModel, AutoTokenizer, trainer_utils
>>> device = "cuda"
>>> model = AutoModel.from_pretrained("Tanrei/GPTSAN-japanese").to(device)
>>> tokenizer = AutoTokenizer.from_pretrained("Tanrei/GPTSAN-japanese")
>>> masked_sentence = "武田信玄は、<|inputmask|>時代ファンならぜひ押さえ<|inputmask|>きたい名将の一人。"
>>> x_token = tokenizer("", prefix_text=masked_sentence, return_tensors="pt")
>>> trainer_utils.set_seed(30)
>>> input_ids = x_token.input_ids.to(device)
>>> token_type_ids = x_token.token_type_ids.to(device)
>>> out_lm_token = model.generate(input_ids, token_type_ids=token_type_ids, max_new_tokens=50)
>>> out_mlm_token = model(input_ids, token_type_ids=token_type_ids).logits.argmax(axis=-1)
>>> tokenizer.decode(out_mlm_token[0])
"武田信玄は、戦国時代ファンならぜひ押さえておきたい名将の一人。"
>>> tokenizer.decode(out_lm_token[0][input_ids.shape[1] :])
"武田氏の三代に渡った武田家のひとり\n甲斐市に住む、日本史上最大の戦国大名。..."
```'''
pass
def prepare_inputs_for_generation(self, input_ids: torch.LongTensor, attention_mask: torch.FloatTensor, token_type_ids: Optional[torch.FloatTensor]=None, spout: Optional[Union[list, torch.FloatTensor]]=None, past_key_values: Optional[Cache]=None, **kwargs):
pass
def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor):
pass
def resize_token_embeddings(self, new_num_tokens: int, pad_to_multiple_of: Optional[int]=None, mean_resizing: bool=True) -> nn.Embedding:
pass
def _resize_final_logits_bias(self, new_num_tokens: int) -> None:
pass
def get_input_embeddings(self):
pass
def set_input_embeddings(self, new_embeddings):
pass
def _unpack_router_logits(self, router_outputs):
pass
| 12
| 1
| 20
| 2
| 14
| 5
| 2
| 0.35
| 1
| 9
| 3
| 0
| 11
| 3
| 11
| 143
| 238
| 28
| 155
| 64
| 116
| 55
| 79
| 36
| 67
| 7
| 3
| 2
| 26
|
1,741
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/gptsan_japanese/modeling_gptsan_japanese.py
|
transformers.models.deprecated.gptsan_japanese.modeling_gptsan_japanese.GPTSanJapaneseLayerDenseFF
|
from .configuration_gptsan_japanese import GPTSanJapaneseConfig
import torch.nn as nn
class GPTSanJapaneseLayerDenseFF(nn.Module):
"""
Extra Transformers Feed Forward layer module.
Parameters:
config : ([`GPTSanJapaneseConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
def __init__(self, config: GPTSanJapaneseConfig):
super().__init__()
self.mlp = GPTSanJapaneseDenseActDense(config, ext_layer=True)
self.norm = nn.LayerNorm(config.d_model, eps=config.layer_norm_epsilon)
def forward(self, hidden_states):
"""
Args:
hidden_states (`torch.Tensor`) :
[num_groups, tokens_per_group, hidden_dim] inputs to send to experts.
Returns:
torch.Tensor[num_groups, tokens_per_group, hidden_dim]
"""
forwarded_states = self.mlp(hidden_states)
output = hidden_states + self.norm(forwarded_states)
return output
|
class GPTSanJapaneseLayerDenseFF(nn.Module):
'''
Extra Transformers Feed Forward layer module.
Parameters:
config : ([`GPTSanJapaneseConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
def __init__(self, config: GPTSanJapaneseConfig):
pass
def forward(self, hidden_states):
'''
Args:
hidden_states (`torch.Tensor`) :
[num_groups, tokens_per_group, hidden_dim] inputs to send to experts.
Returns:
torch.Tensor[num_groups, tokens_per_group, hidden_dim]
'''
pass
| 3
| 2
| 9
| 1
| 4
| 4
| 1
| 1.67
| 1
| 3
| 2
| 0
| 2
| 2
| 2
| 12
| 28
| 4
| 9
| 7
| 6
| 15
| 9
| 7
| 6
| 1
| 1
| 0
| 2
|
1,742
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/gptsan_japanese/modeling_gptsan_japanese.py
|
transformers.models.deprecated.gptsan_japanese.modeling_gptsan_japanese.GPTSanJapaneseLayerSelfAttention
|
import torch
from ....cache_utils import Cache
from ....utils.deprecation import deprecate_kwarg
import torch.nn as nn
from typing import Optional, Union
class GPTSanJapaneseLayerSelfAttention(nn.Module):
"""
Self Attention and Normalization Unit
"""
def __init__(self, config, has_relative_attention_bias=False):
super().__init__()
self.self_attn = GPTSanJapaneseAttention(embed_dim=config.d_model, num_heads=config.num_heads, is_decoder=True, bias=has_relative_attention_bias)
self.norm = nn.LayerNorm(config.d_model, eps=config.layer_norm_epsilon)
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: Optional[tuple[torch.FloatTensor]], past_key_values: Optional[Cache]=None, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=False, output_attentions: Optional[bool]=False) -> tuple[Union[torch.Tensor, tuple[torch.Tensor]], ...]:
"""
Self-attention and normalize block.
Args:
hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
if the model is configured as a decoder.
past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up
decoding. If `past_key_values` are used, the user can optionally input only the last
`decoder_input_ids` (those that don't have their past key value states given to this model) of shape
`(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`.
attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used
in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
head_mask (`numpy.ndarray` of shape `({0})`, `optional):
Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
(see `past_key_values`).
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
Returns:
tuple[torch.Tensor[num_groups, tokens_per_group, hidden_dim],...]
"""
self_attn_past_key_value = past_key_values[:2] if past_key_values is not None else None
atten_out = self.self_attn(hidden_states=hidden_states, past_key_values=self_attn_past_key_value, attention_mask=(1 - attention_mask) * torch.finfo(hidden_states.dtype).min, layer_head_mask=head_mask, output_attentions=output_attentions)
if output_attentions:
attn_weights = (atten_out[1],)
else:
attn_weights = ()
attention_output = atten_out[0]
hidden = hidden_states + self.norm(attention_output)
if use_cache:
outputs = (hidden, atten_out[2])
else:
outputs = (hidden,)
return outputs + attn_weights
|
class GPTSanJapaneseLayerSelfAttention(nn.Module):
'''
Self Attention and Normalization Unit
'''
def __init__(self, config, has_relative_attention_bias=False):
pass
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: Optional[tuple[torch.FloatTensor]], past_key_values: Optional[Cache]=None, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=False, output_attentions: Optional[bool]=False) -> tuple[Union[torch.Tensor, tuple[torch.Tensor]], ...]:
'''
Self-attention and normalize block.
Args:
hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
if the model is configured as a decoder.
past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up
decoding. If `past_key_values` are used, the user can optionally input only the last
`decoder_input_ids` (those that don't have their past key value states given to this model) of shape
`(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`.
attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used
in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
head_mask (`numpy.ndarray` of shape `({0})`, `optional):
Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
(see `past_key_values`).
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
Returns:
tuple[torch.Tensor[num_groups, tokens_per_group, hidden_dim],...]
'''
pass
| 4
| 2
| 39
| 5
| 19
| 17
| 3
| 0.97
| 1
| 4
| 1
| 0
| 2
| 2
| 2
| 12
| 84
| 11
| 38
| 19
| 27
| 37
| 17
| 11
| 14
| 4
| 1
| 1
| 5
|
1,743
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/gptsan_japanese/modeling_gptsan_japanese.py
|
transformers.models.deprecated.gptsan_japanese.modeling_gptsan_japanese.GPTSanJapaneseLayerSparseFF
|
import torch
import torch.nn as nn
from .configuration_gptsan_japanese import GPTSanJapaneseConfig
class GPTSanJapaneseLayerSparseFF(nn.Module):
"""
Switch Transformers Feed Forward layer module. This is a wrapper around the Mixture of Experts module.
Parameters:
config : ([`GPTSanJapaneseConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
def __init__(self, config: GPTSanJapaneseConfig):
super().__init__()
self.mlp = GPTSanJapaneseSparseMLP(config)
self.soft_bypass_mlp = nn.Linear(config.d_model, config.d_model, bias=False)
self.norm = nn.LayerNorm(config.d_model, eps=config.layer_norm_epsilon)
def forward(self, hidden_states, output_router_logits):
"""
Args:
hidden_states (`torch.Tensor`) :
[num_groups, tokens_per_group, hidden_dim] inputs to send to experts.
output_router_logits (`bool`) :
output experts router output.
Returns:
torch.Tensor[num_groups, tokens_per_group, hidden_dim]
"""
forwarded_states, router_tuple = self.mlp(hidden_states)
forwarded_states += torch.tanh(self.soft_bypass_mlp(hidden_states))
output = hidden_states + self.norm(forwarded_states)
if output_router_logits and router_tuple is not None:
return (output, router_tuple)
else:
return output
|
class GPTSanJapaneseLayerSparseFF(nn.Module):
'''
Switch Transformers Feed Forward layer module. This is a wrapper around the Mixture of Experts module.
Parameters:
config : ([`GPTSanJapaneseConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
def __init__(self, config: GPTSanJapaneseConfig):
pass
def forward(self, hidden_states, output_router_logits):
'''
Args:
hidden_states (`torch.Tensor`) :
[num_groups, tokens_per_group, hidden_dim] inputs to send to experts.
output_router_logits (`bool`) :
output experts router output.
Returns:
torch.Tensor[num_groups, tokens_per_group, hidden_dim]
'''
pass
| 3
| 2
| 12
| 1
| 7
| 5
| 2
| 1.14
| 1
| 3
| 2
| 0
| 2
| 3
| 2
| 12
| 35
| 5
| 14
| 8
| 11
| 16
| 13
| 8
| 10
| 2
| 1
| 1
| 3
|
1,744
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/gptsan_japanese/modeling_gptsan_japanese.py
|
transformers.models.deprecated.gptsan_japanese.modeling_gptsan_japanese.GPTSanJapaneseModel
|
from .configuration_gptsan_japanese import GPTSanJapaneseConfig
import torch
from ....utils import DUMMY_INPUTS, DUMMY_MASK, add_start_docstrings, add_start_docstrings_to_model_forward, is_torch_fx_proxy, logging
from typing import Optional, Union
from ....modeling_outputs import MoECausalLMOutputWithPast, MoEModelOutputWithPastAndCrossAttentions
from ....cache_utils import Cache
from ....activations import ACT2FN
import torch.nn as nn
@add_start_docstrings('The bare GPTSAN-japanese Model transformer outputting raw hidden-states without any specific head on top.', GPTSAN_JAPANESE_START_DOCSTRING)
class GPTSanJapaneseModel(GPTSanJapanesePreTrainedModel):
def __init__(self, config: GPTSanJapaneseConfig):
super().__init__(config)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.d_model)
self.embed_tokens = nn.Embedding(config.vocab_size, config.d_model)
self.last_project = nn.Linear(config.d_model, config.d_model, bias=True)
self.act = ACT2FN['swish']
self.blocks = torch.nn.ModuleList([])
for _ in range(config.num_switch_layers):
self.blocks.append(GPTSanJapaneseBlock(config))
for _ in range(config.num_ext_layers):
self.blocks.append(GPTSanJapaneseBlock(config, ext_layer=True))
if config.num_ext_layers > 0:
self.extra_position_embeddings = nn.Embedding(config.max_position_embeddings, config.d_model)
if config.d_spout:
spouts = []
for _ in range(8):
spouts.append(nn.Linear(config.d_spout, config.d_spout, bias=False))
spouts.append(nn.Tanh())
spouts.append(nn.Linear(config.d_spout, config.num_layers * 2 * config.d_model, bias=False))
self.spout = nn.Sequential(*spouts)
self.post_init()
def set_input_embeddings(self, new_embeddings):
self.embed_tokens = new_embeddings
@add_start_docstrings_to_model_forward(GPTSAN_JAPANESE_INPUTS_DOCSTRING)
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.FloatTensor]=None, spout: Optional[torch.FloatTensor]=None, past_key_values: Optional[Cache]=None, head_mask: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=False, inputs_embeds: Optional[torch.FloatTensor]=None, decoder_inputs_embeds: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, output_router_logits: Optional[bool]=None, num_precontext: Optional[torch.LongTensor]=None) -> Union[MoEModelOutputWithPastAndCrossAttentions, tuple[torch.FloatTensor]]:
"""
num_precontext (`torch.LongTensor` of shape `(batch_size,1)`):
length of `hybrid` input tokens in the input. Tokens up to this length refer to both front and back like
BERT, tokens after that refer only to front like GPT. see also:
https://github.com/tanreinama/GPTSAN/blob/main/report/model.md
Returns:
`MoEModelOutputWithPastAndCrossAttentions` or `tuple` if `return_dict` returns
MoEModelOutputWithPastAndCrossAttentions instead of tuple
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
device = self.position_embeddings.weight.device
if input_ids is None:
input_ids = torch.zeros([1, 1]).int().to(device)
if inputs_embeds is not None:
raise NotImplementedError('GPTSanJapaneseModel does not use `inputs_embeds`. Make sure to pass in `input_ids` instead.')
num_pasts_contexts = 0
num_batch = input_ids.shape[0]
pasts_or_spout_value = None
if past_key_values is not None:
num_pasts_contexts = past_key_values.get_seq_length()
elif self.config.d_spout and spout is not None:
num_pasts_contexts += 1
if self.config.d_spout and spout is not None and (attention_mask is not None):
attention_mask_with_spout = torch.ones(num_batch, attention_mask.shape[1] + 1, device=device)
attention_mask_with_spout[:, 1:] -= 1 - attention_mask
attention_mask = attention_mask_with_spout
if num_precontext is not None:
if not (len(num_precontext.shape) == 2 and num_precontext.shape[1] == 1):
raise ValueError('num_precontext should be [batch, 1] size.')
num_precontext = torch.reshape(num_precontext, [-1])
else:
num_precontext = torch.zeros([num_batch]).int().to(device)
num_input_contexts = input_ids.shape[1]
num_output_contexts = num_input_contexts + num_pasts_contexts
hidden_states = self.embed_tokens(input_ids)
if past_key_values is not None:
pasts_or_spout_value = past_key_values
elif self.config.d_spout and spout is not None:
pasts_or_spout_value = self.spout(spout)
pasts_or_spout_value = torch.reshape(pasts_or_spout_value, [num_batch, self.config.num_layers, 2, self.config.num_heads, num_pasts_contexts, self.config.d_model // self.config.num_heads])
pasts_or_spout_value = torch.split(pasts_or_spout_value, [1] * self.config.num_layers, dim=1)
pasts_or_spout_value = tuple((tuple((b.squeeze(1) for b in torch.split(a.squeeze(1), [1, 1], dim=1))) for a in pasts_or_spout_value))
else:
pasts_or_spout_value = [None] * self.config.num_layers
token_position = torch.arange(num_input_contexts).to(device) + num_pasts_contexts
if attention_mask is None:
attention_mask = torch.ones(num_batch, num_input_contexts, device=device)
gather_position = (torch.zeros((num_batch, self.config.d_model, num_input_contexts)).to(device) + token_position.unsqueeze(0)).transpose(1, 2).long()
gather_position -= (1 - attention_mask).argmin(dim=-1).unsqueeze(1).unsqueeze(2)
gather_position = torch.clip(gather_position, num_pasts_contexts, self.config.max_position_embeddings - 1)
for i in range(num_batch):
hidden_states[i] += torch.gather(self.position_embeddings.weight, dim=0, index=gather_position[i])
causal_mask = torch.tril(torch.ones((num_output_contexts, num_output_contexts), dtype=torch.uint8)).view(1, 1, num_output_contexts, num_output_contexts).to(device)
prefix_lm_mask = causal_mask[:, :, -num_input_contexts:, :]
if token_type_ids is not None:
token_type_ids = token_type_ids.unsqueeze(1).unsqueeze(2)
prefix_lm_mask = (prefix_lm_mask + token_type_ids > 0).float()
extended_attention_mask = prefix_lm_mask * attention_mask.unsqueeze(1).unsqueeze(2)
if head_mask is not None:
head_mask = self.get_head_mask(head_mask, self.config.num_switch_layers + self.config.num_ext_layers)
present_key_value_states = () if self.config.use_cache or use_cache else None
all_hidden_states = () if self.config.output_hidden_states or output_hidden_states else None
all_attentions = () if self.config.output_attentions or output_attentions else None
all_router_probs = () if self.config.output_router_logits or output_router_logits else None
for layer, past in enumerate(pasts_or_spout_value):
if layer == self.config.num_switch_layers:
if self.config.num_ext_layers > 0:
for i in range(num_batch):
hidden_states[i] += torch.gather(self.extra_position_embeddings.weight, dim=0, index=gather_position[i])
output_router_tuple = (self.config.output_router_logits or output_router_logits) and layer < self.config.num_switch_layers
block_output = self.blocks[layer](hidden_states=hidden_states, past_key_values=past, attention_mask=extended_attention_mask, head_mask=head_mask, use_cache=self.config.use_cache or use_cache, output_attentions=self.config.output_attentions or output_attentions, output_router_tuple=output_router_tuple)
outpos = 0
hidden_states = block_output[outpos]
if self.config.output_hidden_states or output_hidden_states:
all_hidden_states += (hidden_states,)
if self.config.use_cache or use_cache:
outpos += 1
present = block_output[outpos]
present_key_value_states += (present,)
if self.config.output_attentions or output_attentions:
outpos += 1
attention_probs = block_output[outpos]
all_attentions += (attention_probs,)
if output_router_tuple:
outpos += 1
router_tuple = block_output[outpos]
all_router_probs.append(router_tuple[0])
hidden_states = self.last_project(hidden_states)
hidden_states = self.act(hidden_states)
if self.config.output_hidden_states or output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple((v for v in [hidden_states, present_key_value_states, all_hidden_states, all_attentions, all_router_probs] if v is not None))
return MoEModelOutputWithPastAndCrossAttentions(last_hidden_state=hidden_states, past_key_values=present_key_value_states, hidden_states=all_hidden_states, attentions=all_attentions, router_probs=all_router_probs)
|
@add_start_docstrings('The bare GPTSAN-japanese Model transformer outputting raw hidden-states without any specific head on top.', GPTSAN_JAPANESE_START_DOCSTRING)
class GPTSanJapaneseModel(GPTSanJapanesePreTrainedModel):
def __init__(self, config: GPTSanJapaneseConfig):
pass
def set_input_embeddings(self, new_embeddings):
pass
@add_start_docstrings_to_model_forward(GPTSAN_JAPANESE_INPUTS_DOCSTRING)
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.FloatTensor]=None, spout: Optional[torch.FloatTensor]=None, past_key_values: Optional[Cache]=None, head_mask: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=False, inputs_embeds: Optional[torch.FloatTensor]=None, decoder_inputs_embeds: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, output_router_logits: Optional[bool]=None, num_precontext: Optional[torch.LongTensor]=None) -> Union[MoEModelOutputWithPastAndCrossAttentions, tuple[torch.FloatTensor]]:
'''
num_precontext (`torch.LongTensor` of shape `(batch_size,1)`):
length of `hybrid` input tokens in the input. Tokens up to this length refer to both front and back like
BERT, tokens after that refer only to front like GPT. see also:
https://github.com/tanreinama/GPTSAN/blob/main/report/model.md
Returns:
`MoEModelOutputWithPastAndCrossAttentions` or `tuple` if `return_dict` returns
MoEModelOutputWithPastAndCrossAttentions instead of tuple
'''
pass
| 6
| 1
| 60
| 6
| 46
| 9
| 9
| 0.19
| 1
| 10
| 3
| 0
| 4
| 8
| 4
| 136
| 243
| 27
| 187
| 57
| 165
| 35
| 109
| 40
| 104
| 29
| 3
| 4
| 37
|
1,745
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/gptsan_japanese/modeling_gptsan_japanese.py
|
transformers.models.deprecated.gptsan_japanese.modeling_gptsan_japanese.GPTSanJapanesePreTrainedModel
|
from ....modeling_utils import PreTrainedModel
from .configuration_gptsan_japanese import GPTSanJapaneseConfig
import torch
from ....utils import DUMMY_INPUTS, DUMMY_MASK, add_start_docstrings, add_start_docstrings_to_model_forward, is_torch_fx_proxy, logging
import torch.nn as nn
class GPTSanJapanesePreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config: GPTSanJapaneseConfig
base_model_prefix = 'gptsan_japanese'
supports_gradient_checkpointing = False
_no_split_modules = ['GPTSanJapaneseBlock']
_skip_keys_device_placement = 'past_key_values'
@property
def dummy_inputs(self):
input_ids = torch.tensor(DUMMY_INPUTS)
input_mask = torch.tensor(DUMMY_MASK)
dummy_inputs = {'input_ids': input_ids, 'attention_mask': input_mask}
return dummy_inputs
def _init_weights(self, module):
"""Initialize the weights"""
factor = self.config.initializer_factor
if isinstance(module, nn.LayerNorm):
module.weight.data.fill_(factor * 1.0)
module.bias.data.zero_()
elif isinstance(module, nn.Linear):
module.weight.data.normal_(mean=0.0, std=factor * self.config.d_model ** (-0.5))
if hasattr(module, 'bias') and module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=factor * 1.0)
elif isinstance(module, GPTSanJapaneseModel):
module.embed_tokens.weight.data.normal_(mean=0.0, std=factor * 1.0)
module.position_embeddings.weight.data.normal_(mean=0.0, std=factor * 1.0)
if hasattr(module, 'extra_position_embeddings') and module.extra_position_embeddings is not None:
module.extra_position_embeddings.weight.data.normal_(mean=0.0, std=factor * 1.0)
elif isinstance(module, (GPTSanJapaneseModel, GPTSanJapaneseForConditionalGeneration)):
module.final_logits_bias.data.normal_(mean=0.0, std=factor * 1.0)
if hasattr(module, 'lm_head') and (not self.config.tie_word_embeddings):
module.lm_head.weight.data.normal_(mean=0.0, std=factor * 1.0)
elif isinstance(module, GPTSanJapaneseDenseActDense):
module.wi.weight.data.normal_(mean=0.0, std=factor * self.config.d_model ** (-0.5))
if hasattr(module.wi, 'bias') and module.wi.bias is not None:
module.wi.bias.data.zero_()
module.wo.weight.data.normal_(mean=0.0, std=factor * self.config.d_ff ** (-0.5))
if hasattr(module.wo, 'bias') and module.wo.bias is not None:
module.wo.bias.data.zero_()
elif isinstance(module, GPTSanJapaneseAttention):
d_model = self.config.d_model
key_value_proj_dim = self.config.d_model
n_heads = self.config.num_heads
module.k_proj.weight.data.normal_(mean=0.0, std=factor * (d_model * key_value_proj_dim) ** (-0.5))
module.v_proj.weight.data.normal_(mean=0.0, std=factor * (d_model * key_value_proj_dim) ** (-0.5))
module.q_proj.weight.data.normal_(mean=0.0, std=factor * (d_model * key_value_proj_dim) ** (-0.5))
module.out_proj.weight.data.normal_(mean=0.0, std=factor * (n_heads * key_value_proj_dim) ** (-0.5))
elif isinstance(module, GPTSanJapaneseSparseMLP):
d_model = self.config.d_model
key_value_proj_dim = self.config.d_model
n_heads = self.config.num_heads
module.router.classifier.weight.data.normal_(mean=0.0, std=factor * 1)
for idx in range(self.config.num_experts):
module.experts[f'expert_{idx}'].wi.weight.data.normal_(mean=0.0, std=factor * d_model ** (-0.5))
module.experts[f'expert_{idx}'].wo.weight.data.normal_(mean=0.0, std=factor * d_model ** (-0.5))
def _shift_right(self, input_ids):
decoder_start_token_id = self.config.decoder_start_token_id
pad_token_id = self.config.pad_token_id
if decoder_start_token_id is None:
raise ValueError('self.model.config.decoder_start_token_id has to be defined. In T5 it is usually set to the pad_token_id. See T5 docs for more information.')
if is_torch_fx_proxy(input_ids):
shifted_input_ids = torch.full(input_ids.shape[:-1] + (1,), decoder_start_token_id)
shifted_input_ids = torch.cat([shifted_input_ids, input_ids[..., :-1]], dim=-1)
else:
shifted_input_ids = input_ids.new_zeros(input_ids.shape)
shifted_input_ids[..., 1:] = input_ids[..., :-1].clone()
shifted_input_ids[..., 0] = decoder_start_token_id
if pad_token_id is None:
raise ValueError('self.model.config.pad_token_id has to be defined.')
shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id)
return shifted_input_ids
|
class GPTSanJapanesePreTrainedModel(PreTrainedModel):
'''
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
'''
@property
def dummy_inputs(self):
pass
def _init_weights(self, module):
'''Initialize the weights'''
pass
def _shift_right(self, input_ids):
pass
| 5
| 2
| 29
| 1
| 23
| 5
| 7
| 0.25
| 1
| 7
| 5
| 2
| 3
| 0
| 3
| 132
| 103
| 8
| 77
| 21
| 72
| 19
| 62
| 20
| 58
| 15
| 2
| 2
| 20
|
1,746
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/gptsan_japanese/modeling_gptsan_japanese.py
|
transformers.models.deprecated.gptsan_japanese.modeling_gptsan_japanese.GPTSanJapaneseSparseMLP
|
from .configuration_gptsan_japanese import GPTSanJapaneseConfig
import torch
import torch.nn as nn
class GPTSanJapaneseSparseMLP(nn.Module):
"""
Implementation of the Switch Transformers Sparse MLP module.
"""
def __init__(self, config: GPTSanJapaneseConfig, expert_class: nn.Module=GPTSanJapaneseDenseActDense):
super().__init__()
self.router = GPTSanJapaneseTop1Router(config)
self.experts = nn.ModuleDict()
for idx in range(config.num_experts):
self.experts[f'expert_{idx}'] = expert_class(config)
def forward(self, hidden_states):
"""
Hold on, this will be slightly tricky to understand In the correct order, a MoE layer does the following:
1- Gets the `router_mask` from the router. The shape of the mask is `(batch_size, sequence_length, num_expert)`
and corresponds to the argmax of the `router_probs`. The probabilities are needed in the computation of the
hidden states : they are broadcasted to the hidden states values (can be interpreted as a scaling factor).
2- Dispatch the tokens to its associated experts. We do a classic for loop over the experts and assign for each
expert the corresponding hidden states.
"""
router_mask, router_probs, router_logits = self.router(hidden_states)
expert_index = torch.argmax(router_mask, dim=-1)
next_states = hidden_states.clone()
for idx, expert in enumerate(self.experts.values()):
token_indices = router_mask[:, :, idx].bool()
next_states[token_indices] = expert(hidden_states[token_indices]).to(next_states.dtype)
hidden_states = router_probs * next_states
return (hidden_states, (router_logits, expert_index))
|
class GPTSanJapaneseSparseMLP(nn.Module):
'''
Implementation of the Switch Transformers Sparse MLP module.
'''
def __init__(self, config: GPTSanJapaneseConfig, expert_class: nn.Module=GPTSanJapaneseDenseActDense):
pass
def forward(self, hidden_states):
'''
Hold on, this will be slightly tricky to understand In the correct order, a MoE layer does the following:
1- Gets the `router_mask` from the router. The shape of the mask is `(batch_size, sequence_length, num_expert)`
and corresponds to the argmax of the `router_probs`. The probabilities are needed in the computation of the
hidden states : they are broadcasted to the hidden states values (can be interpreted as a scaling factor).
2- Dispatch the tokens to its associated experts. We do a classic for loop over the experts and assign for each
expert the corresponding hidden states.
'''
pass
| 3
| 2
| 18
| 4
| 8
| 7
| 2
| 1
| 1
| 6
| 3
| 0
| 2
| 2
| 2
| 12
| 41
| 9
| 16
| 11
| 13
| 16
| 16
| 11
| 13
| 2
| 1
| 1
| 4
|
1,747
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/gptsan_japanese/modeling_gptsan_japanese.py
|
transformers.models.deprecated.gptsan_japanese.modeling_gptsan_japanese.GPTSanJapaneseTop1Router
|
import torch
import torch.nn as nn
from .configuration_gptsan_japanese import GPTSanJapaneseConfig
class GPTSanJapaneseTop1Router(nn.Module):
"""
Router using tokens choose top-1 experts assignment.
This router uses the same mechanism as in Switch Transformer (https://huggingface.co/papers/2101.03961) and V-MoE
(https://huggingface.co/papers/2106.05974): tokens choose their top experts. Items are sorted by router_probs and then
routed to their choice of expert until the expert's expert_capacity is reached. **There is no guarantee that each
token is processed by an expert**, or that each expert receives at least one token.
"""
def __init__(self, config: GPTSanJapaneseConfig):
super().__init__()
self.num_experts = config.num_experts
self.expert_capacity = config.expert_capacity
self.classifier = nn.Linear(config.hidden_size, self.num_experts, bias=config.router_bias)
self.jitter_noise = config.router_jitter_noise
self.ignore_padding_tokens = config.router_ignore_padding_tokens
self.dtype = getattr(torch, config.router_dtype)
def _compute_router_probabilities(self, hidden_states: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]:
"""
Computes router probabilities from input hidden states.
Args:
hidden_states (`torch.Tensor`):
(batch_size, sequence_length, hidden_dim) from which router probabilities are computed.
Returns:
router_probabilities (`torch.Tensor`):
Tensor of shape (batch_size, sequence_length, num_experts) corresponding to the probabilities for each
token and expert. Used for routing tokens to experts.
router_logits (`torch.Tensor`):
Logits tensor of shape (batch_size, sequence_length, num_experts) corresponding to raw router logits.
This is used later for computing router z-loss.
"""
self.input_dtype = hidden_states.dtype
hidden_states = hidden_states.to(self.dtype)
if self.training and self.jitter_noise > 0:
hidden_states *= torch.empty_like(hidden_states).uniform_(1.0 - self.jitter_noise, 1.0 + self.jitter_noise)
self._cast_classifier()
router_logits = self.classifier(hidden_states)
router_probabilities = nn.functional.softmax(router_logits, dim=-1, dtype=self.dtype).to(self.input_dtype)
return (router_probabilities, router_logits)
def _cast_classifier(self):
"""
`bitsandbytes` `Linear8bitLt` layers does not support manual casting Therefore we need to check if they are an
instance of the `Linear8bitLt` class by checking special attributes.
"""
if not (hasattr(self.classifier, 'SCB') or hasattr(self.classifier, 'CB')):
self.classifier = self.classifier.to(self.dtype)
def forward(self, hidden_states: torch.Tensor) -> tuple:
"""
Generic forward function for every Router class. Each Router expects to have the same input hidden states
(`hidden_states`) corresponding to the hidden states for each token, the `expert_capacity` corresponding to the
number of tokens the Router will send to each expert, some Routers can send up to few tokens to each expert.
Each Router works as the following: it expects the hidden states for each token, gets the `router_probs` and
`router_logits` from the `router_weights`. This will assign for each token, the raw probability to be assigned
to an expert. Then each Router class will have to define its own `_compute_routing_instructions`.
Args:
hidden_states (`torch.Tensor`) :
[num_groups, tokens_per_group, hidden_dim] inputs to send to experts.
Returns:
tuple[`torch.Tensor`, `torch.Tensor`, `torch.Tensor`] Tuple containing the expert index, the router probs
and the router logits. The router probabilities and logits are required to compute the loss.
"""
router_probs, router_logits = self._compute_router_probabilities(hidden_states)
expert_index = torch.argmax(router_probs, dim=-1)
expert_index = torch.nn.functional.one_hot(expert_index, num_classes=self.num_experts)
token_priority = torch.cumsum(expert_index, dim=-2)
expert_capacity_mask = token_priority <= self.expert_capacity
expert_index = expert_index * expert_capacity_mask
router_probs = torch.max(router_probs, dim=-1).values.unsqueeze(-1)
return (expert_index, router_probs, router_logits)
|
class GPTSanJapaneseTop1Router(nn.Module):
'''
Router using tokens choose top-1 experts assignment.
This router uses the same mechanism as in Switch Transformer (https://huggingface.co/papers/2101.03961) and V-MoE
(https://huggingface.co/papers/2106.05974): tokens choose their top experts. Items are sorted by router_probs and then
routed to their choice of expert until the expert's expert_capacity is reached. **There is no guarantee that each
token is processed by an expert**, or that each expert receives at least one token.
'''
def __init__(self, config: GPTSanJapaneseConfig):
pass
def _compute_router_probabilities(self, hidden_states: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]:
'''
Computes router probabilities from input hidden states.
Args:
hidden_states (`torch.Tensor`):
(batch_size, sequence_length, hidden_dim) from which router probabilities are computed.
Returns:
router_probabilities (`torch.Tensor`):
Tensor of shape (batch_size, sequence_length, num_experts) corresponding to the probabilities for each
token and expert. Used for routing tokens to experts.
router_logits (`torch.Tensor`):
Logits tensor of shape (batch_size, sequence_length, num_experts) corresponding to raw router logits.
This is used later for computing router z-loss.
'''
pass
def _cast_classifier(self):
'''
`bitsandbytes` `Linear8bitLt` layers does not support manual casting Therefore we need to check if they are an
instance of the `Linear8bitLt` class by checking special attributes.
'''
pass
def forward(self, hidden_states: torch.Tensor) -> tuple:
'''
Generic forward function for every Router class. Each Router expects to have the same input hidden states
(`hidden_states`) corresponding to the hidden states for each token, the `expert_capacity` corresponding to the
number of tokens the Router will send to each expert, some Routers can send up to few tokens to each expert.
Each Router works as the following: it expects the hidden states for each token, gets the `router_probs` and
`router_logits` from the `router_weights`. This will assign for each token, the raw probability to be assigned
to an expert. Then each Router class will have to define its own `_compute_routing_instructions`.
Args:
hidden_states (`torch.Tensor`) :
[num_groups, tokens_per_group, hidden_dim] inputs to send to experts.
Returns:
tuple[`torch.Tensor`, `torch.Tensor`, `torch.Tensor`] Tuple containing the expert index, the router probs
and the router logits. The router probabilities and logits are required to compute the loss.
'''
pass
| 5
| 4
| 19
| 2
| 7
| 10
| 2
| 1.53
| 1
| 3
| 1
| 0
| 4
| 7
| 4
| 14
| 91
| 15
| 30
| 18
| 25
| 46
| 30
| 18
| 25
| 2
| 1
| 1
| 6
|
1,748
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/gptsan_japanese/tokenization_gptsan_japanese.py
|
transformers.models.deprecated.gptsan_japanese.tokenization_gptsan_japanese.GPTSanJapaneseTokenizer
|
import os
from ....tokenization_utils import PreTrainedTokenizer
from ....utils import PaddingStrategy, logging
from ....tokenization_utils_base import BatchEncoding, PreTokenizedInput, PreTokenizedInputPair, TextInput, TextInputPair, TruncationStrategy
from typing import Optional, Union
import json
class GPTSanJapaneseTokenizer(PreTrainedTokenizer):
"""
This tokenizer is based on GPTNeoXJapaneseTokenizer and has the following modifications
- Decoding byte0~byte255 tokens correctly
- Added bagofword token handling
- Return token_type_ids for Prefix-LM model
The bagofword token represents a repetition of the previous token and is converted to 3 consecutive tokens when
decoding In addition, the original Japanese special Sub-Word-Encoding has been released in this repository
(https://github.com/tanreinama/Japanese-BPEEncoder_V2). The token_type_ids is a mask indicating the prefix input
position of the Prefix-LM model. To specify a prefix position, specify a prefix input for prefix_text, or specify a
sentence of the prefix part and the part after it as a text pair of batch input.
Example:
```python
>>> from transformers import GPTSanJapaneseTokenizer
>>> tokenizer = GPTSanJapaneseTokenizer.from_pretrained("Tanrei/GPTSAN-japanese")
>>> # You can confirm both 慶応 and 慶應 are encoded to 17750
>>> tokenizer("吾輩は猫である🐯。実は慶応(慶應)大学出身")["input_ids"]
[35993, 35998, 34347, 31459, 30647, 31448, 25, 30659, 35729, 35676, 32417, 30647, 17750, 35589, 17750, 35590, 321, 1281]
>>> # Both 慶応 and 慶應 are decoded to 慶応
>>> tokenizer.decode(tokenizer("吾輩は猫である🐯。実は慶応(慶應)大学出身")["input_ids"])
'吾輩は猫である🐯。実は慶応(慶応)大学出身'
```
Example for Prefix-LM:
```python
>>> from transformers import GPTSanJapaneseTokenizer
>>> tokenizer = GPTSanJapaneseTokenizer.from_pretrained("Tanrei/GPTSAN-japanese")
>>> tokenizer("実は慶応(慶應)大学出身", prefix_text="吾輩は猫である🐯。")["input_ids"]
[35993, 34347, 31459, 30647, 31448, 25, 30659, 35729, 35676, 35998, 32417, 30647, 17750, 35589, 17750, 35590, 321, 1281]
>>> # Mask for Prefix-LM inputs
>>> tokenizer("実は慶応(慶應)大学出身", prefix_text="吾輩は猫である🐯。")["token_type_ids"]
[1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0]
```
Example for batch encode:
```python
>>> from transformers import GPTSanJapaneseTokenizer
>>> tokenizer = GPTSanJapaneseTokenizer.from_pretrained("Tanrei/GPTSAN-japanese")
>>> tokenizer([["武田信玄", "は、"], ["織田信長", "の配下の、"]], padding=True)["input_ids"]
[[35993, 35998, 8640, 25948, 35993, 35998, 30647, 35675, 35999, 35999], [35993, 35998, 10382, 9868, 35993, 35998, 30646, 9459, 30646, 35675]]
>>> # Mask for Prefix-LM inputs
>>> tokenizer([["武田信玄", "は、"], ["織田信長", "の配下の、"]], padding=True)["token_type_ids"]
[[1, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0]]
>>> # Mask for padding
>>> tokenizer([["武田信玄", "は、"], ["織田信長", "の配下の、"]], padding=True)["attention_mask"]
[[1, 1, 1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
```
Args:
vocab_file (`str`):
File containing the vocabulary.
emoji_file (`str`):
File containing the emoji.
unk_token (`str`, *optional*, defaults to `"<|nottoken|>"`):
The token used for unknown character
pad_token (`str`, *optional*, defaults to `"<|separator|>"`):
The token used for padding
bos_token (`str`, *optional*, defaults to `"<|startoftext|>"`):
The beginning of sequence token.
eos_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
The end of sequence token.
sep_token (`str`, *optional*, defaults to `"<|segmenter|>"`):
A special token to separate token to prefix part and general input part.
do_clean_text (`bool`, *optional*, defaults to `False`):
Whether or not to clean text for URL, EMAIL, TEL, Japanese DATE and Japanese PRICE.
"""
vocab_files_names = VOCAB_FILES_NAMES
model_input_names = ['input_ids', 'attention_mask', 'token_type_ids']
def __init__(self, vocab_file, emoji_file, unk_token='<|nottoken|>', pad_token='<|separator|>', bos_token='<|startoftext|>', eos_token='<|endoftext|>', sep_token='<|segmenter|>', do_clean_text=False, **kwargs):
if not os.path.isfile(vocab_file):
raise ValueError(f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained model use `tokenizer = GPTSanJapaneseTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`")
if not os.path.isfile(emoji_file):
raise ValueError(f"Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google pretrained model use `tokenizer = GPTSanJapaneseTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`")
self.do_clean_text = do_clean_text
self.vocab, self.raw_vocab, self.ids_to_tokens, self.emoji = load_vocab_and_emoji(vocab_file, emoji_file)
self.subword_tokenizer = SubWordJapaneseTokenizer(vocab=self.vocab, ids_to_tokens=self.ids_to_tokens, emoji=self.emoji)
super().__init__(unk_token=unk_token, pad_token=pad_token, bos_token=bos_token, eos_token=eos_token, sep_token=sep_token, do_clean_text=do_clean_text, **kwargs)
@property
def vocab_size(self):
return len(self.raw_vocab)
def get_vocab(self):
return dict(self.raw_vocab, **self.added_tokens_encoder)
def _tokenize(self, text):
return self.subword_tokenizer.tokenize(text, clean=self.do_clean_text)
def _convert_token_to_id(self, token):
"""Converts a token (str) in an id using the vocab."""
return self.vocab.get(token, self.vocab.get(self.unk_token))
def _convert_id_to_token(self, index):
"""Converts an index (integer) in a token (str) using the vocab."""
return self.subword_tokenizer.convert_id_to_token(index)
def convert_tokens_to_string(self, tokens):
"""Converts a sequence of tokens (string) in a single string."""
words = []
byte_tokens = []
for word in tokens:
if word[:6] == '<|byte' and word[-2:] == '|>':
byte_tokens.append(int(word[6:-2]))
else:
if len(byte_tokens) > 0:
words.append(bytearray(byte_tokens).decode('utf-8', errors='replace'))
byte_tokens = []
if word[:7] == '<|emoji' and word[-2:] == '|>':
words.append(self.emoji['emoji_inv'][word])
elif word == '<SP>':
words.append(' ')
elif word == '<BR>':
words.append('\n')
elif word == '<TAB>':
words.append('\t')
elif word == '<BLOCK>':
words.append('▀')
elif word == '<KIGOU>':
words.append('ǀ')
elif word == '<U2000U2BFF>':
words.append('‖')
elif word == '<|bagoftoken|>':
if len(words) > 0:
words.append(words[-1])
words.append(words[-1])
words.append(words[-1])
elif word.startswith('<|') and word.endswith('|>'):
words.append('')
else:
words.append(word)
if len(byte_tokens) > 0:
words.append(bytearray(byte_tokens).decode('utf-8', errors='replace'))
text = ''.join(words)
return text
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]:
index = 0
if os.path.isdir(save_directory):
vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
emoji_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['emoji_file'])
else:
vocab_file = (filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['vocab_file']
emoji_file = (filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['emoji_file']
with open(vocab_file, 'w', encoding='utf-8') as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(f'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive. Please check that the vocabulary is not corrupted!')
index = token_index
writer.write(','.join(token) + '\n')
index += 1
with open(emoji_file, 'w', encoding='utf-8') as writer:
json.dump(self.emoji, writer)
return (vocab_file, emoji_file)
def create_token_type_ids_from_sequences(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]:
"""
The tokenizer returns token_type_ids as separators between the Prefix part and the rest.
token_type_ids is 1 for the Prefix part and 0 for the rest of the token.
Example:
```python
>>> from transformers import GPTSanJapaneseTokenizer
>>> tokenizer = GPTSanJapaneseTokenizer.from_pretrained("Tanrei/GPTSAN-japanese")
>>> x_token = tokenizer("アイウエ")
>>> # input_ids: | SOT | SEG | ア | イ | ウ | エ |
>>> # token_type_ids: | 1 | 0 | 0 | 0 | 0 | 0 |
>>> x_token = tokenizer("", prefix_text="アイウエ")
>>> # input_ids: | SOT | ア | イ | ウ | エ | SEG |
>>> # token_type_ids: | 1 | 1 | 1 | 1 | 1 | 0 |
>>> x_token = tokenizer("ウエ", prefix_text="アイ")
>>> # input_ids: | SOT | ア | イ | SEG | ウ | エ |
>>> # token_type_ids: | 1 | 1 | 1 | 0 | 0 | 0 |
```"""
prefix_len = 0
if self.sep_token in self.vocab:
segid = self.vocab[self.sep_token]
if segid in token_ids_0:
prefix_len = token_ids_0.index(segid)
if token_ids_1 is None:
total_len = len(token_ids_0)
else:
total_len = len(token_ids_0 + token_ids_1)
return prefix_len * [1] + (total_len - prefix_len) * [0]
def prepare_for_tokenization(self, text, prefix_text=None, add_sep_token=None, **kwargs):
if add_sep_token is None:
add_sep_token = self.sep_token not in text
prepared = self.bos_token if self.bos_token in self.vocab else ''
prepared += prefix_text if prefix_text is not None else ''
if add_sep_token:
prepared += self.sep_token if self.sep_token in self.vocab else ''
prepared += text
return (prepared, kwargs)
def _batch_encode_plus(self, batch_text_or_text_pairs: Union[list[TextInput], list[TextInputPair], list[PreTokenizedInput], list[PreTokenizedInputPair]], add_special_tokens: bool=True, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy=TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int]=None, stride: int=0, is_split_into_words: bool=False, pad_to_multiple_of: Optional[int]=None, return_tensors: Optional[str]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding:
if isinstance(batch_text_or_text_pairs[0], tuple) or isinstance(tuple(batch_text_or_text_pairs[0]), list):
batch_prefix_texts = []
for pref, txt in batch_text_or_text_pairs:
batch_prefix_texts.append(pref + self.sep_token + txt)
batch_text_or_text_pairs = batch_prefix_texts
return super()._batch_encode_plus(batch_text_or_text_pairs, add_special_tokens, padding_strategy, truncation_strategy, max_length, stride, is_split_into_words, pad_to_multiple_of, return_tensors, return_token_type_ids, return_attention_mask, return_overflowing_tokens, return_special_tokens_mask, return_offsets_mapping, return_length, verbose, **kwargs)
|
class GPTSanJapaneseTokenizer(PreTrainedTokenizer):
'''
This tokenizer is based on GPTNeoXJapaneseTokenizer and has the following modifications
- Decoding byte0~byte255 tokens correctly
- Added bagofword token handling
- Return token_type_ids for Prefix-LM model
The bagofword token represents a repetition of the previous token and is converted to 3 consecutive tokens when
decoding In addition, the original Japanese special Sub-Word-Encoding has been released in this repository
(https://github.com/tanreinama/Japanese-BPEEncoder_V2). The token_type_ids is a mask indicating the prefix input
position of the Prefix-LM model. To specify a prefix position, specify a prefix input for prefix_text, or specify a
sentence of the prefix part and the part after it as a text pair of batch input.
Example:
```python
>>> from transformers import GPTSanJapaneseTokenizer
>>> tokenizer = GPTSanJapaneseTokenizer.from_pretrained("Tanrei/GPTSAN-japanese")
>>> # You can confirm both 慶応 and 慶應 are encoded to 17750
>>> tokenizer("吾輩は猫である🐯。実は慶応(慶應)大学出身")["input_ids"]
[35993, 35998, 34347, 31459, 30647, 31448, 25, 30659, 35729, 35676, 32417, 30647, 17750, 35589, 17750, 35590, 321, 1281]
>>> # Both 慶応 and 慶應 are decoded to 慶応
>>> tokenizer.decode(tokenizer("吾輩は猫である🐯。実は慶応(慶應)大学出身")["input_ids"])
'吾輩は猫である🐯。実は慶応(慶応)大学出身'
```
Example for Prefix-LM:
```python
>>> from transformers import GPTSanJapaneseTokenizer
>>> tokenizer = GPTSanJapaneseTokenizer.from_pretrained("Tanrei/GPTSAN-japanese")
>>> tokenizer("実は慶応(慶應)大学出身", prefix_text="吾輩は猫である🐯。")["input_ids"]
[35993, 34347, 31459, 30647, 31448, 25, 30659, 35729, 35676, 35998, 32417, 30647, 17750, 35589, 17750, 35590, 321, 1281]
>>> # Mask for Prefix-LM inputs
>>> tokenizer("実は慶応(慶應)大学出身", prefix_text="吾輩は猫である🐯。")["token_type_ids"]
[1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0]
```
Example for batch encode:
```python
>>> from transformers import GPTSanJapaneseTokenizer
>>> tokenizer = GPTSanJapaneseTokenizer.from_pretrained("Tanrei/GPTSAN-japanese")
>>> tokenizer([["武田信玄", "は、"], ["織田信長", "の配下の、"]], padding=True)["input_ids"]
[[35993, 35998, 8640, 25948, 35993, 35998, 30647, 35675, 35999, 35999], [35993, 35998, 10382, 9868, 35993, 35998, 30646, 9459, 30646, 35675]]
>>> # Mask for Prefix-LM inputs
>>> tokenizer([["武田信玄", "は、"], ["織田信長", "の配下の、"]], padding=True)["token_type_ids"]
[[1, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0]]
>>> # Mask for padding
>>> tokenizer([["武田信玄", "は、"], ["織田信長", "の配下の、"]], padding=True)["attention_mask"]
[[1, 1, 1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
```
Args:
vocab_file (`str`):
File containing the vocabulary.
emoji_file (`str`):
File containing the emoji.
unk_token (`str`, *optional*, defaults to `"<|nottoken|>"`):
The token used for unknown character
pad_token (`str`, *optional*, defaults to `"<|separator|>"`):
The token used for padding
bos_token (`str`, *optional*, defaults to `"<|startoftext|>"`):
The beginning of sequence token.
eos_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
The end of sequence token.
sep_token (`str`, *optional*, defaults to `"<|segmenter|>"`):
A special token to separate token to prefix part and general input part.
do_clean_text (`bool`, *optional*, defaults to `False`):
Whether or not to clean text for URL, EMAIL, TEL, Japanese DATE and Japanese PRICE.
'''
def __init__(self, vocab_file, emoji_file, unk_token='<|nottoken|>', pad_token='<|separator|>', bos_token='<|startoftext|>', eos_token='<|endoftext|>', sep_token='<|segmenter|>', do_clean_text=False, **kwargs):
pass
@property
def vocab_size(self):
pass
def get_vocab(self):
pass
def _tokenize(self, text):
pass
def _convert_token_to_id(self, token):
'''Converts a token (str) in an id using the vocab.'''
pass
def _convert_id_to_token(self, index):
'''Converts an index (integer) in a token (str) using the vocab.'''
pass
def convert_tokens_to_string(self, tokens):
'''Converts a sequence of tokens (string) in a single string.'''
pass
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]:
pass
def create_token_type_ids_from_sequences(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]:
'''
The tokenizer returns token_type_ids as separators between the Prefix part and the rest.
token_type_ids is 1 for the Prefix part and 0 for the rest of the token.
Example:
```python
>>> from transformers import GPTSanJapaneseTokenizer
>>> tokenizer = GPTSanJapaneseTokenizer.from_pretrained("Tanrei/GPTSAN-japanese")
>>> x_token = tokenizer("アイウエ")
>>> # input_ids: | SOT | SEG | ア | イ | ウ | エ |
>>> # token_type_ids: | 1 | 0 | 0 | 0 | 0 | 0 |
>>> x_token = tokenizer("", prefix_text="アイウエ")
>>> # input_ids: | SOT | ア | イ | ウ | エ | SEG |
>>> # token_type_ids: | 1 | 1 | 1 | 1 | 1 | 0 |
>>> x_token = tokenizer("ウエ", prefix_text="アイ")
>>> # input_ids: | SOT | ア | イ | SEG | ウ | エ |
>>> # token_type_ids: | 1 | 1 | 1 | 0 | 0 | 0 |
```'''
pass
def prepare_for_tokenization(self, text, prefix_text=None, add_sep_token=None, **kwargs):
pass
def _batch_encode_plus(self, batch_text_or_text_pairs: Union[list[TextInput], list[TextInputPair], list[PreTokenizedInput], list[PreTokenizedInputPair]], add_special_tokens: bool=True, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy=TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int]=None, stride: int=0, is_split_into_words: bool=False, pad_to_multiple_of: Optional[int]=None, return_tensors: Optional[str]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding:
pass
| 13
| 5
| 19
| 1
| 16
| 2
| 4
| 0.48
| 1
| 12
| 3
| 0
| 11
| 8
| 11
| 100
| 304
| 32
| 184
| 68
| 137
| 89
| 92
| 31
| 80
| 15
| 3
| 4
| 44
|
1,749
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/gptsan_japanese/tokenization_gptsan_japanese.py
|
transformers.models.deprecated.gptsan_japanese.tokenization_gptsan_japanese.SubWordJapaneseTokenizer
|
import sys
import re
import numpy as np
class SubWordJapaneseTokenizer:
"""
This tokenizer is based on GPTNeoXJapaneseTokenizer and has the following modifications
- Decoding byte0~byte255 tokens correctly
- Added bagofword token handling
https://github.com/tanreinama/Japanese-BPEEncoder_V2 This tokenizer class is under MIT License according to the
original repository.
MIT License
Copyright (c) 2020 tanreinama
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of
the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
def __init__(self, vocab, ids_to_tokens, emoji):
self.vocab = vocab
self.ids_to_tokens = ids_to_tokens
self.emoji = emoji
self.maxlen = np.max([len(w) for w in self.vocab])
self.content_repatter1 = re.compile("(https?|ftp)(:\\/\\/[-_\\.!~*\\'()a-zA-Z0-9;\\/?:\\@&=\\+$,%#]+)")
self.content_repatter2 = re.compile('[A-Za-z0-9\\._+]*@[\\-_0-9A-Za-z]+(\\.[A-Za-z]+)*')
self.content_repatter3 = re.compile('[\\(]{0,1}[0-9]{2,4}[\\)\\-\\(]{0,1}[0-9]{2,4}[\\)\\-]{0,1}[0-9]{3,4}')
self.content_repatter4 = re.compile('([12]\\d{3}[/\\-年])*(0?[1-9]|1[0-2])[/\\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\\d{1,2}|:|\\d{1,2}時|\\d{1,2}分|\\(日\\)|\\(月\\)|\\(火\\)|\\(水\\)|\\(木\\)|\\(金\\)|\\(土\\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*')
self.content_repatter5 = re.compile('(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\\u32ff)\\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\\d{1,2}|:|\\d{1,2}時|\\d{1,2}分|\\(日\\)|\\(月\\)|\\(火\\)|\\(水\\)|\\(木\\)|\\(金\\)|\\(土\\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*')
if sys.version_info >= (3, 11):
self.content_repatter6 = re.compile('(?:\\d,\\d{3}|[\\d億])*+(?:\\d,\\d{3}|[\\d万])*+(?:\\d,\\d{3}|[\\d千])*+(?:千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(?:\\(税込\\)|\\(税抜\\)|\\+tax)*')
else:
self.content_repatter6 = re.compile('(?:\\d,\\d{3}|[\\d億万千])*(?:千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(?:\\(税込\\)|\\(税抜\\)|\\+tax)*')
keisen = '─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿'
blocks = '▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟'
self.content_trans1 = str.maketrans(dict.fromkeys(keisen + blocks, '<BLOCK>'))
def __len__(self):
return len(self.ids_to_tokens)
def clean_text(self, content):
content = self.content_repatter1.sub('<URL>', content)
content = self.content_repatter2.sub('<EMAIL>', content)
content = self.content_repatter3.sub('<TEL>', content)
content = self.content_repatter4.sub('<DATE>', content)
content = self.content_repatter5.sub('<DATE>', content)
content = self.content_repatter6.sub('<PRICE>', content)
content = content.translate(self.content_trans1)
while '<BLOCK><BLOCK>' in content:
content = content.replace('<BLOCK><BLOCK>', '<BLOCK>')
return content
def tokenize(self, text, clean=False):
text = text.replace(' ', '<SP>')
text = text.replace('\u3000', '<SP>')
text = text.replace('\r\n', '<BR>')
text = text.replace('\n', '<BR>')
text = text.replace('\r', '<BR>')
text = text.replace('\t', '<TAB>')
text = text.replace('—', 'ー')
text = text.replace('−', 'ー')
for k, v in self.emoji['emoji'].items():
if k in text:
text = text.replace(k, v)
if clean:
text = self.clean_text(text)
def check_simbol(x):
e = x.encode()
if len(x) == 1 and len(e) == 2:
c = (int(e[0]) << 8) + int(e[1])
if c >= 49825 and c <= 49855 or (c >= 51072 and c <= 51075) or (c >= 51897 and c <= 52159) or (c >= 52352 and c <= 52642):
return True
return False
def checku2e(x):
e = x.encode()
if len(x) == 1 and len(e) == 3:
c = (int(e[0]) << 16) + (int(e[1]) << 8) + int(e[2])
if c >= 14844032 and c <= 14856319:
return True
return False
pos = 0
result = []
while pos < len(text):
end = min(len(text), pos + self.maxlen + 1) if text[pos] == '<' else pos + 3
candidates = []
for e in range(end, pos, -1):
wd = text[pos:e]
if wd in self.vocab:
if wd[0] == '<' and len(wd) > 2:
candidates = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e))
if len(candidates) > 0:
_, wd, e = sorted(candidates, key=lambda x: x[0])[0]
result.append(wd)
pos = e
else:
end = pos + 1
wd = text[pos:end]
if check_simbol(wd):
result.append('<KIGOU>')
elif checku2e(wd):
result.append('<U2000U2BFF>')
else:
for i in wd.encode('utf-8'):
result.append('<|byte%d|>' % i)
pos = end
return result
def convert_id_to_token(self, index):
return self.ids_to_tokens[index][0]
|
class SubWordJapaneseTokenizer:
'''
This tokenizer is based on GPTNeoXJapaneseTokenizer and has the following modifications
- Decoding byte0~byte255 tokens correctly
- Added bagofword token handling
https://github.com/tanreinama/Japanese-BPEEncoder_V2 This tokenizer class is under MIT License according to the
original repository.
MIT License
Copyright (c) 2020 tanreinama
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of
the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
def __init__(self, vocab, ids_to_tokens, emoji):
pass
def __len__(self):
pass
def clean_text(self, content):
pass
def tokenize(self, text, clean=False):
pass
def check_simbol(x):
pass
def checku2e(x):
pass
def convert_id_to_token(self, index):
pass
| 8
| 1
| 17
| 0
| 17
| 1
| 3
| 0.26
| 0
| 3
| 0
| 0
| 5
| 11
| 5
| 5
| 133
| 14
| 98
| 34
| 90
| 25
| 83
| 34
| 75
| 13
| 0
| 4
| 24
|
1,750
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/graphormer/collating_graphormer.py
|
transformers.models.deprecated.graphormer.collating_graphormer.GraphormerDataCollator
|
import numpy as np
from typing import Any
import torch
from ....utils import is_cython_available, requires_backends
from collections.abc import Mapping
class GraphormerDataCollator:
def __init__(self, spatial_pos_max=20, on_the_fly_processing=False):
if not is_cython_available():
raise ImportError('Graphormer preprocessing needs Cython (pyximport)')
self.spatial_pos_max = spatial_pos_max
self.on_the_fly_processing = on_the_fly_processing
def __call__(self, features: list[dict]) -> dict[str, Any]:
if self.on_the_fly_processing:
features = [preprocess_item(i) for i in features]
if not isinstance(features[0], Mapping):
features = [vars(f) for f in features]
batch = {}
max_node_num = max((len(i['input_nodes']) for i in features))
node_feat_size = len(features[0]['input_nodes'][0])
edge_feat_size = len(features[0]['attn_edge_type'][0][0])
max_dist = max((len(i['input_edges'][0][0]) for i in features))
edge_input_size = len(features[0]['input_edges'][0][0][0])
batch_size = len(features)
batch['attn_bias'] = torch.zeros(batch_size, max_node_num + 1, max_node_num + 1, dtype=torch.float)
batch['attn_edge_type'] = torch.zeros(batch_size, max_node_num, max_node_num, edge_feat_size, dtype=torch.long)
batch['spatial_pos'] = torch.zeros(batch_size, max_node_num, max_node_num, dtype=torch.long)
batch['in_degree'] = torch.zeros(batch_size, max_node_num, dtype=torch.long)
batch['input_nodes'] = torch.zeros(batch_size, max_node_num, node_feat_size, dtype=torch.long)
batch['input_edges'] = torch.zeros(batch_size, max_node_num, max_node_num, max_dist, edge_input_size, dtype=torch.long)
for ix, f in enumerate(features):
for k in ['attn_bias', 'attn_edge_type', 'spatial_pos', 'in_degree', 'input_nodes', 'input_edges']:
f[k] = torch.tensor(f[k])
if len(f['attn_bias'][1:, 1:][f['spatial_pos'] >= self.spatial_pos_max]) > 0:
f['attn_bias'][1:, 1:][f['spatial_pos'] >= self.spatial_pos_max] = float('-inf')
batch['attn_bias'][ix, :f['attn_bias'].shape[0], :f['attn_bias'].shape[1]] = f['attn_bias']
batch['attn_edge_type'][ix, :f['attn_edge_type'].shape[0], :f['attn_edge_type'].shape[1], :] = f['attn_edge_type']
batch['spatial_pos'][ix, :f['spatial_pos'].shape[0], :f['spatial_pos'].shape[1]] = f['spatial_pos']
batch['in_degree'][ix, :f['in_degree'].shape[0]] = f['in_degree']
batch['input_nodes'][ix, :f['input_nodes'].shape[0], :] = f['input_nodes']
batch['input_edges'][ix, :f['input_edges'].shape[0], :f['input_edges'].shape[1], :f['input_edges'].shape[2], :] = f['input_edges']
batch['out_degree'] = batch['in_degree']
sample = features[0]['labels']
if len(sample) == 1:
if isinstance(sample[0], float):
batch['labels'] = torch.from_numpy(np.concatenate([i['labels'] for i in features]))
else:
batch['labels'] = torch.from_numpy(np.concatenate([i['labels'] for i in features]))
else:
batch['labels'] = torch.from_numpy(np.stack([i['labels'] for i in features], axis=0))
return batch
|
class GraphormerDataCollator:
def __init__(self, spatial_pos_max=20, on_the_fly_processing=False):
pass
def __call__(self, features: list[dict]) -> dict[str, Any]:
pass
| 3
| 0
| 30
| 5
| 25
| 2
| 5
| 0.08
| 0
| 6
| 0
| 0
| 2
| 2
| 2
| 2
| 62
| 11
| 51
| 15
| 48
| 4
| 43
| 15
| 40
| 8
| 0
| 2
| 10
|
1,751
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/graphormer/configuration_graphormer.py
|
transformers.models.deprecated.graphormer.configuration_graphormer.GraphormerConfig
|
from ....configuration_utils import PretrainedConfig
from typing import Optional
class GraphormerConfig(PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`~GraphormerModel`]. It is used to instantiate an
Graphormer model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the Graphormer
[graphormer-base-pcqm4mv1](https://huggingface.co/graphormer-base-pcqm4mv1) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
num_classes (`int`, *optional*, defaults to 1):
Number of target classes or labels, set to n for binary classification of n tasks.
num_atoms (`int`, *optional*, defaults to 512*9):
Number of node types in the graphs.
num_edges (`int`, *optional*, defaults to 512*3):
Number of edges types in the graph.
num_in_degree (`int`, *optional*, defaults to 512):
Number of in degrees types in the input graphs.
num_out_degree (`int`, *optional*, defaults to 512):
Number of out degrees types in the input graphs.
num_edge_dis (`int`, *optional*, defaults to 128):
Number of edge dis in the input graphs.
multi_hop_max_dist (`int`, *optional*, defaults to 20):
Maximum distance of multi hop edges between two nodes.
spatial_pos_max (`int`, *optional*, defaults to 1024):
Maximum distance between nodes in the graph attention bias matrices, used during preprocessing and
collation.
edge_type (`str`, *optional*, defaults to multihop):
Type of edge relation chosen.
max_nodes (`int`, *optional*, defaults to 512):
Maximum number of nodes which can be parsed for the input graphs.
share_input_output_embed (`bool`, *optional*, defaults to `False`):
Shares the embedding layer between encoder and decoder - careful, True is not implemented.
num_layers (`int`, *optional*, defaults to 12):
Number of layers.
embedding_dim (`int`, *optional*, defaults to 768):
Dimension of the embedding layer in encoder.
ffn_embedding_dim (`int`, *optional*, defaults to 768):
Dimension of the "intermediate" (often named feed-forward) layer in encoder.
num_attention_heads (`int`, *optional*, defaults to 32):
Number of attention heads in the encoder.
self_attention (`bool`, *optional*, defaults to `True`):
Model is self attentive (False not implemented).
activation_function (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"silu"` and `"gelu_new"` are supported.
dropout (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_dropout (`float`, *optional*, defaults to 0.1):
The dropout probability for the attention weights.
activation_dropout (`float`, *optional*, defaults to 0.1):
The dropout probability for the activation of the linear transformer layer.
layerdrop (`float`, *optional*, defaults to 0.0):
The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556)
for more details.
bias (`bool`, *optional*, defaults to `True`):
Uses bias in the attention module - unsupported at the moment.
embed_scale(`float`, *optional*, defaults to None):
Scaling factor for the node embeddings.
num_trans_layers_to_freeze (`int`, *optional*, defaults to 0):
Number of transformer layers to freeze.
encoder_normalize_before (`bool`, *optional*, defaults to `False`):
Normalize features before encoding the graph.
pre_layernorm (`bool`, *optional*, defaults to `False`):
Apply layernorm before self attention and the feed forward network. Without this, post layernorm will be
used.
apply_graphormer_init (`bool`, *optional*, defaults to `False`):
Apply a custom graphormer initialisation to the model before training.
freeze_embeddings (`bool`, *optional*, defaults to `False`):
Freeze the embedding layer, or train it along the model.
encoder_normalize_before (`bool`, *optional*, defaults to `False`):
Apply the layer norm before each encoder block.
q_noise (`float`, *optional*, defaults to 0.0):
Amount of quantization noise (see "Training with Quantization Noise for Extreme Model Compression"). (For
more detail, see fairseq's documentation on quant_noise).
qn_block_size (`int`, *optional*, defaults to 8):
Size of the blocks for subsequent quantization with iPQ (see q_noise).
kdim (`int`, *optional*, defaults to None):
Dimension of the key in the attention, if different from the other values.
vdim (`int`, *optional*, defaults to None):
Dimension of the value in the attention, if different from the other values.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
traceable (`bool`, *optional*, defaults to `False`):
Changes return value of the encoder's inner_state to stacked tensors.
Example:
```python
>>> from transformers import GraphormerForGraphClassification, GraphormerConfig
>>> # Initializing a Graphormer graphormer-base-pcqm4mv2 style configuration
>>> configuration = GraphormerConfig()
>>> # Initializing a model from the graphormer-base-pcqm4mv1 style configuration
>>> model = GraphormerForGraphClassification(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```
"""
model_type = 'graphormer'
keys_to_ignore_at_inference = ['past_key_values']
def __init__(self, num_classes: int=1, num_atoms: int=512 * 9, num_edges: int=512 * 3, num_in_degree: int=512, num_out_degree: int=512, num_spatial: int=512, num_edge_dis: int=128, multi_hop_max_dist: int=5, spatial_pos_max: int=1024, edge_type: str='multi_hop', max_nodes: int=512, share_input_output_embed: bool=False, num_hidden_layers: int=12, embedding_dim: int=768, ffn_embedding_dim: int=768, num_attention_heads: int=32, dropout: float=0.1, attention_dropout: float=0.1, activation_dropout: float=0.1, layerdrop: float=0.0, encoder_normalize_before: bool=False, pre_layernorm: bool=False, apply_graphormer_init: bool=False, activation_fn: str='gelu', embed_scale: Optional[float]=None, freeze_embeddings: bool=False, num_trans_layers_to_freeze: int=0, traceable: bool=False, q_noise: float=0.0, qn_block_size: int=8, kdim: Optional[int]=None, vdim: Optional[int]=None, bias: bool=True, self_attention: bool=True, pad_token_id=0, bos_token_id=1, eos_token_id=2, **kwargs):
self.num_classes = num_classes
self.num_atoms = num_atoms
self.num_in_degree = num_in_degree
self.num_out_degree = num_out_degree
self.num_edges = num_edges
self.num_spatial = num_spatial
self.num_edge_dis = num_edge_dis
self.edge_type = edge_type
self.multi_hop_max_dist = multi_hop_max_dist
self.spatial_pos_max = spatial_pos_max
self.max_nodes = max_nodes
self.num_hidden_layers = num_hidden_layers
self.embedding_dim = embedding_dim
self.hidden_size = embedding_dim
self.ffn_embedding_dim = ffn_embedding_dim
self.num_attention_heads = num_attention_heads
self.dropout = dropout
self.attention_dropout = attention_dropout
self.activation_dropout = activation_dropout
self.layerdrop = layerdrop
self.encoder_normalize_before = encoder_normalize_before
self.pre_layernorm = pre_layernorm
self.apply_graphormer_init = apply_graphormer_init
self.activation_fn = activation_fn
self.embed_scale = embed_scale
self.freeze_embeddings = freeze_embeddings
self.num_trans_layers_to_freeze = num_trans_layers_to_freeze
self.share_input_output_embed = share_input_output_embed
self.traceable = traceable
self.q_noise = q_noise
self.qn_block_size = qn_block_size
self.kdim = kdim
self.vdim = vdim
self.self_attention = self_attention
self.bias = bias
super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
|
class GraphormerConfig(PretrainedConfig):
'''
This is the configuration class to store the configuration of a [`~GraphormerModel`]. It is used to instantiate an
Graphormer model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the Graphormer
[graphormer-base-pcqm4mv1](https://huggingface.co/graphormer-base-pcqm4mv1) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
num_classes (`int`, *optional*, defaults to 1):
Number of target classes or labels, set to n for binary classification of n tasks.
num_atoms (`int`, *optional*, defaults to 512*9):
Number of node types in the graphs.
num_edges (`int`, *optional*, defaults to 512*3):
Number of edges types in the graph.
num_in_degree (`int`, *optional*, defaults to 512):
Number of in degrees types in the input graphs.
num_out_degree (`int`, *optional*, defaults to 512):
Number of out degrees types in the input graphs.
num_edge_dis (`int`, *optional*, defaults to 128):
Number of edge dis in the input graphs.
multi_hop_max_dist (`int`, *optional*, defaults to 20):
Maximum distance of multi hop edges between two nodes.
spatial_pos_max (`int`, *optional*, defaults to 1024):
Maximum distance between nodes in the graph attention bias matrices, used during preprocessing and
collation.
edge_type (`str`, *optional*, defaults to multihop):
Type of edge relation chosen.
max_nodes (`int`, *optional*, defaults to 512):
Maximum number of nodes which can be parsed for the input graphs.
share_input_output_embed (`bool`, *optional*, defaults to `False`):
Shares the embedding layer between encoder and decoder - careful, True is not implemented.
num_layers (`int`, *optional*, defaults to 12):
Number of layers.
embedding_dim (`int`, *optional*, defaults to 768):
Dimension of the embedding layer in encoder.
ffn_embedding_dim (`int`, *optional*, defaults to 768):
Dimension of the "intermediate" (often named feed-forward) layer in encoder.
num_attention_heads (`int`, *optional*, defaults to 32):
Number of attention heads in the encoder.
self_attention (`bool`, *optional*, defaults to `True`):
Model is self attentive (False not implemented).
activation_function (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"silu"` and `"gelu_new"` are supported.
dropout (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_dropout (`float`, *optional*, defaults to 0.1):
The dropout probability for the attention weights.
activation_dropout (`float`, *optional*, defaults to 0.1):
The dropout probability for the activation of the linear transformer layer.
layerdrop (`float`, *optional*, defaults to 0.0):
The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556)
for more details.
bias (`bool`, *optional*, defaults to `True`):
Uses bias in the attention module - unsupported at the moment.
embed_scale(`float`, *optional*, defaults to None):
Scaling factor for the node embeddings.
num_trans_layers_to_freeze (`int`, *optional*, defaults to 0):
Number of transformer layers to freeze.
encoder_normalize_before (`bool`, *optional*, defaults to `False`):
Normalize features before encoding the graph.
pre_layernorm (`bool`, *optional*, defaults to `False`):
Apply layernorm before self attention and the feed forward network. Without this, post layernorm will be
used.
apply_graphormer_init (`bool`, *optional*, defaults to `False`):
Apply a custom graphormer initialisation to the model before training.
freeze_embeddings (`bool`, *optional*, defaults to `False`):
Freeze the embedding layer, or train it along the model.
encoder_normalize_before (`bool`, *optional*, defaults to `False`):
Apply the layer norm before each encoder block.
q_noise (`float`, *optional*, defaults to 0.0):
Amount of quantization noise (see "Training with Quantization Noise for Extreme Model Compression"). (For
more detail, see fairseq's documentation on quant_noise).
qn_block_size (`int`, *optional*, defaults to 8):
Size of the blocks for subsequent quantization with iPQ (see q_noise).
kdim (`int`, *optional*, defaults to None):
Dimension of the key in the attention, if different from the other values.
vdim (`int`, *optional*, defaults to None):
Dimension of the value in the attention, if different from the other values.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
traceable (`bool`, *optional*, defaults to `False`):
Changes return value of the encoder's inner_state to stacked tensors.
Example:
```python
>>> from transformers import GraphormerForGraphClassification, GraphormerConfig
>>> # Initializing a Graphormer graphormer-base-pcqm4mv2 style configuration
>>> configuration = GraphormerConfig()
>>> # Initializing a model from the graphormer-base-pcqm4mv1 style configuration
>>> model = GraphormerForGraphClassification(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```
'''
def __init__(self, num_classes: int=1, num_atoms: int=512 * 9, num_edges: int=512 * 3, num_in_degree: int=512, num_out_degree: int=512, num_spatial: int=512, num_edge_dis: int=128, multi_hop_max_dist: int=5, spatial_pos_max: int=1024, edge_type: str='multi_hop', max_nodes: int=512, share_input_output_embed: bool=False, num_hidden_layers: int=12, embedding_dim: int=768, ffn_embedding_dim: int=768, num_attention_heads: int=32, dropout: float=0.1, attention_dropout: float=0.1, activation_dropout: float=0.1, layerdrop: float=0.0, encoder_normalize_before: bool=False, pre_layernorm: bool=False, apply_graphormer_init: bool=False, activation_fn: str='gelu', embed_scale: Optional[float]=None, freeze_embeddings: bool=False, num_trans_layers_to_freeze: int=0, traceable: bool=False, q_noise: float=0.0, qn_block_size: int=8, kdim: Optional[int]=None, vdim: Optional[int]=None, bias: bool=True, self_attention: bool=True, pad_token_id=0, bos_token_id=1, eos_token_id=2, **kwargs):
pass
| 2
| 1
| 86
| 2
| 82
| 3
| 1
| 1.14
| 1
| 5
| 0
| 0
| 1
| 35
| 1
| 33
| 192
| 11
| 85
| 79
| 43
| 97
| 40
| 39
| 38
| 1
| 2
| 0
| 1
|
1,752
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/graphormer/modeling_graphormer.py
|
transformers.models.deprecated.graphormer.modeling_graphormer.GraphormerDecoderHead
|
import torch
import torch.nn as nn
class GraphormerDecoderHead(nn.Module):
def __init__(self, embedding_dim: int, num_classes: int):
super().__init__()
'num_classes should be 1 for regression, or the number of classes for classification'
self.lm_output_learned_bias = nn.Parameter(torch.zeros(1))
self.classifier = nn.Linear(embedding_dim, num_classes, bias=False)
self.num_classes = num_classes
def forward(self, input_nodes: torch.Tensor, **unused) -> torch.Tensor:
input_nodes = self.classifier(input_nodes)
input_nodes = input_nodes + self.lm_output_learned_bias
return input_nodes
|
class GraphormerDecoderHead(nn.Module):
def __init__(self, embedding_dim: int, num_classes: int):
pass
def forward(self, input_nodes: torch.Tensor, **unused) -> torch.Tensor:
pass
| 3
| 0
| 5
| 0
| 5
| 1
| 1
| 0.1
| 1
| 3
| 0
| 0
| 2
| 3
| 2
| 12
| 12
| 1
| 10
| 6
| 7
| 1
| 10
| 6
| 7
| 1
| 1
| 0
| 2
|
1,753
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/graphormer/modeling_graphormer.py
|
transformers.models.deprecated.graphormer.modeling_graphormer.GraphormerForGraphClassification
|
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
import torch.nn as nn
from .configuration_graphormer import GraphormerConfig
from ....modeling_outputs import BaseModelOutputWithNoAttention, SequenceClassifierOutput
from typing import Optional, Union
import torch
class GraphormerForGraphClassification(GraphormerPreTrainedModel):
"""
This model can be used for graph-level classification or regression tasks.
It can be trained on
- regression (by setting config.num_classes to 1); there should be one float-type label per graph
- one task classification (by setting config.num_classes to the number of classes); there should be one integer
label per graph
- binary multi-task classification (by setting config.num_classes to the number of labels); there should be a list
of integer labels for each graph.
"""
def __init__(self, config: GraphormerConfig):
super().__init__(config)
self.encoder = GraphormerModel(config)
self.embedding_dim = config.embedding_dim
self.num_classes = config.num_classes
self.classifier = GraphormerDecoderHead(self.embedding_dim, self.num_classes)
self.is_encoder_decoder = True
self.post_init()
def forward(self, input_nodes: torch.LongTensor, input_edges: torch.LongTensor, attn_bias: torch.Tensor, in_degree: torch.LongTensor, out_degree: torch.LongTensor, spatial_pos: torch.LongTensor, attn_edge_type: torch.LongTensor, labels: Optional[torch.LongTensor]=None, return_dict: Optional[bool]=None, **unused) -> Union[tuple[torch.Tensor], SequenceClassifierOutput]:
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
encoder_outputs = self.encoder(input_nodes, input_edges, attn_bias, in_degree, out_degree, spatial_pos, attn_edge_type, return_dict=True)
outputs, hidden_states = (encoder_outputs['last_hidden_state'], encoder_outputs['hidden_states'])
head_outputs = self.classifier(outputs)
logits = head_outputs[:, 0, :].contiguous()
loss = None
if labels is not None:
mask = ~torch.isnan(labels)
if self.num_classes == 1:
loss_fct = MSELoss()
loss = loss_fct(logits[mask].squeeze(), labels[mask].squeeze().float())
elif self.num_classes > 1 and len(labels.shape) == 1:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits[mask].view(-1, self.num_classes), labels[mask].view(-1))
else:
loss_fct = BCEWithLogitsLoss(reduction='sum')
loss = loss_fct(logits[mask], labels[mask])
if not return_dict:
return tuple((x for x in [loss, logits, hidden_states] if x is not None))
return SequenceClassifierOutput(loss=loss, logits=logits, hidden_states=hidden_states, attentions=None)
|
class GraphormerForGraphClassification(GraphormerPreTrainedModel):
'''
This model can be used for graph-level classification or regression tasks.
It can be trained on
- regression (by setting config.num_classes to 1); there should be one float-type label per graph
- one task classification (by setting config.num_classes to the number of classes); there should be one integer
label per graph
- binary multi-task classification (by setting config.num_classes to the number of labels); there should be a list
of integer labels for each graph.
'''
def __init__(self, config: GraphormerConfig):
pass
def forward(self, input_nodes: torch.LongTensor, input_edges: torch.LongTensor, attn_bias: torch.Tensor, in_degree: torch.LongTensor, out_degree: torch.LongTensor, spatial_pos: torch.LongTensor, attn_edge_type: torch.LongTensor, labels: Optional[torch.LongTensor]=None, return_dict: Optional[bool]=None, **unused) -> Union[tuple[torch.Tensor], SequenceClassifierOutput]:
pass
| 3
| 1
| 29
| 3
| 25
| 2
| 4
| 0.25
| 1
| 8
| 4
| 0
| 2
| 5
| 2
| 134
| 70
| 9
| 51
| 27
| 36
| 13
| 28
| 15
| 25
| 6
| 3
| 2
| 7
|
1,754
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/graphormer/modeling_graphormer.py
|
transformers.models.deprecated.graphormer.modeling_graphormer.GraphormerGraphAttnBias
|
import torch
from .configuration_graphormer import GraphormerConfig
import torch.nn as nn
class GraphormerGraphAttnBias(nn.Module):
"""
Compute attention bias for each head.
"""
def __init__(self, config: GraphormerConfig):
super().__init__()
self.num_heads = config.num_attention_heads
self.multi_hop_max_dist = config.multi_hop_max_dist
self.edge_encoder = nn.Embedding(config.num_edges + 1, config.num_attention_heads, padding_idx=0)
self.edge_type = config.edge_type
if self.edge_type == 'multi_hop':
self.edge_dis_encoder = nn.Embedding(config.num_edge_dis * config.num_attention_heads * config.num_attention_heads, 1)
self.spatial_pos_encoder = nn.Embedding(config.num_spatial, config.num_attention_heads, padding_idx=0)
self.graph_token_virtual_distance = nn.Embedding(1, config.num_attention_heads)
def forward(self, input_nodes: torch.LongTensor, attn_bias: torch.Tensor, spatial_pos: torch.LongTensor, input_edges: torch.LongTensor, attn_edge_type: torch.LongTensor) -> torch.Tensor:
n_graph, n_node = input_nodes.size()[:2]
graph_attn_bias = attn_bias.clone()
graph_attn_bias = graph_attn_bias.unsqueeze(1).repeat(1, self.num_heads, 1, 1)
spatial_pos_bias = self.spatial_pos_encoder(spatial_pos).permute(0, 3, 1, 2)
graph_attn_bias[:, :, 1:, 1:] = graph_attn_bias[:, :, 1:, 1:] + spatial_pos_bias
t = self.graph_token_virtual_distance.weight.view(1, self.num_heads, 1)
graph_attn_bias[:, :, 1:, 0] = graph_attn_bias[:, :, 1:, 0] + t
graph_attn_bias[:, :, 0, :] = graph_attn_bias[:, :, 0, :] + t
if self.edge_type == 'multi_hop':
spatial_pos_ = spatial_pos.clone()
spatial_pos_[spatial_pos_ == 0] = 1
spatial_pos_ = torch.where(spatial_pos_ > 1, spatial_pos_ - 1, spatial_pos_)
if self.multi_hop_max_dist > 0:
spatial_pos_ = spatial_pos_.clamp(0, self.multi_hop_max_dist)
input_edges = input_edges[:, :, :, :self.multi_hop_max_dist, :]
input_edges = self.edge_encoder(input_edges).mean(-2)
max_dist = input_edges.size(-2)
edge_input_flat = input_edges.permute(3, 0, 1, 2, 4).reshape(max_dist, -1, self.num_heads)
edge_input_flat = torch.bmm(edge_input_flat, self.edge_dis_encoder.weight.reshape(-1, self.num_heads, self.num_heads)[:max_dist, :, :])
input_edges = edge_input_flat.reshape(max_dist, n_graph, n_node, n_node, self.num_heads).permute(1, 2, 3, 0, 4)
input_edges = (input_edges.sum(-2) / spatial_pos_.float().unsqueeze(-1)).permute(0, 3, 1, 2)
else:
input_edges = self.edge_encoder(attn_edge_type).mean(-2).permute(0, 3, 1, 2)
graph_attn_bias[:, :, 1:, 1:] = graph_attn_bias[:, :, 1:, 1:] + input_edges
graph_attn_bias = graph_attn_bias + attn_bias.unsqueeze(1)
return graph_attn_bias
|
class GraphormerGraphAttnBias(nn.Module):
'''
Compute attention bias for each head.
'''
def __init__(self, config: GraphormerConfig):
pass
def forward(self, input_nodes: torch.LongTensor, attn_bias: torch.Tensor, spatial_pos: torch.LongTensor, input_edges: torch.LongTensor, attn_edge_type: torch.LongTensor) -> torch.Tensor:
pass
| 3
| 1
| 37
| 6
| 27
| 6
| 3
| 0.27
| 1
| 3
| 1
| 0
| 2
| 7
| 2
| 12
| 80
| 13
| 55
| 24
| 45
| 15
| 37
| 17
| 34
| 3
| 1
| 2
| 5
|
1,755
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/graphormer/modeling_graphormer.py
|
transformers.models.deprecated.graphormer.modeling_graphormer.GraphormerGraphEncoder
|
from .configuration_graphormer import GraphormerConfig
import torch.nn as nn
import torch
from typing import Optional, Union
class GraphormerGraphEncoder(nn.Module):
def __init__(self, config: GraphormerConfig):
super().__init__()
self.dropout_module = torch.nn.Dropout(p=config.dropout, inplace=False)
self.layerdrop = config.layerdrop
self.embedding_dim = config.embedding_dim
self.apply_graphormer_init = config.apply_graphormer_init
self.traceable = config.traceable
self.graph_node_feature = GraphormerGraphNodeFeature(config)
self.graph_attn_bias = GraphormerGraphAttnBias(config)
self.embed_scale = config.embed_scale
if config.q_noise > 0:
self.quant_noise = quant_noise(nn.Linear(self.embedding_dim, self.embedding_dim, bias=False), config.q_noise, config.qn_block_size)
else:
self.quant_noise = None
if config.encoder_normalize_before:
self.emb_layer_norm = nn.LayerNorm(self.embedding_dim)
else:
self.emb_layer_norm = None
if config.pre_layernorm:
self.final_layer_norm = nn.LayerNorm(self.embedding_dim)
if self.layerdrop > 0.0:
self.layers = LayerDropModuleList(p=self.layerdrop)
else:
self.layers = nn.ModuleList([])
self.layers.extend([GraphormerGraphEncoderLayer(config) for _ in range(config.num_hidden_layers)])
if config.freeze_embeddings:
raise NotImplementedError('Freezing embeddings is not implemented yet.')
for layer in range(config.num_trans_layers_to_freeze):
m = self.layers[layer]
if m is not None:
for p in m.parameters():
p.requires_grad = False
def forward(self, input_nodes: torch.LongTensor, input_edges: torch.LongTensor, attn_bias: torch.Tensor, in_degree: torch.LongTensor, out_degree: torch.LongTensor, spatial_pos: torch.LongTensor, attn_edge_type: torch.LongTensor, perturb=None, last_state_only: bool=False, token_embeddings: Optional[torch.Tensor]=None, attn_mask: Optional[torch.Tensor]=None) -> tuple[Union[torch.Tensor, list[torch.LongTensor]], torch.Tensor]:
data_x = input_nodes
n_graph, n_node = data_x.size()[:2]
padding_mask = data_x[:, :, 0].eq(0)
padding_mask_cls = torch.zeros(n_graph, 1, device=padding_mask.device, dtype=padding_mask.dtype)
padding_mask = torch.cat((padding_mask_cls, padding_mask), dim=1)
attn_bias = self.graph_attn_bias(input_nodes, attn_bias, spatial_pos, input_edges, attn_edge_type)
if token_embeddings is not None:
input_nodes = token_embeddings
else:
input_nodes = self.graph_node_feature(input_nodes, in_degree, out_degree)
if perturb is not None:
input_nodes[:, 1:, :] += perturb
if self.embed_scale is not None:
input_nodes = input_nodes * self.embed_scale
if self.quant_noise is not None:
input_nodes = self.quant_noise(input_nodes)
if self.emb_layer_norm is not None:
input_nodes = self.emb_layer_norm(input_nodes)
input_nodes = self.dropout_module(input_nodes)
input_nodes = input_nodes.transpose(0, 1)
inner_states = []
if not last_state_only:
inner_states.append(input_nodes)
for layer in self.layers:
input_nodes, _ = layer(input_nodes, self_attn_padding_mask=padding_mask, self_attn_mask=attn_mask, self_attn_bias=attn_bias)
if not last_state_only:
inner_states.append(input_nodes)
graph_rep = input_nodes[0, :, :]
if last_state_only:
inner_states = [input_nodes]
if self.traceable:
return (torch.stack(inner_states), graph_rep)
else:
return (inner_states, graph_rep)
|
class GraphormerGraphEncoder(nn.Module):
def __init__(self, config: GraphormerConfig):
pass
def forward(self, input_nodes: torch.LongTensor, input_edges: torch.LongTensor, attn_bias: torch.Tensor, in_degree: torch.LongTensor, out_degree: torch.LongTensor, spatial_pos: torch.LongTensor, attn_edge_type: torch.LongTensor, perturb=None, last_state_only: bool=False, token_embeddings: Optional[torch.Tensor]=None, attn_mask: Optional[torch.Tensor]=None) -> tuple[Union[torch.Tensor, list[torch.LongTensor]], torch.Tensor]:
pass
| 3
| 0
| 57
| 11
| 45
| 1
| 10
| 0.02
| 1
| 10
| 5
| 0
| 2
| 12
| 2
| 12
| 115
| 23
| 90
| 40
| 74
| 2
| 63
| 26
| 60
| 11
| 1
| 3
| 20
|
1,756
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/graphormer/modeling_graphormer.py
|
transformers.models.deprecated.graphormer.modeling_graphormer.GraphormerGraphEncoderLayer
|
from typing import Optional, Union
from .configuration_graphormer import GraphormerConfig
from ....activations import ACT2FN
import torch.nn as nn
import torch
class GraphormerGraphEncoderLayer(nn.Module):
def __init__(self, config: GraphormerConfig) -> None:
super().__init__()
self.embedding_dim = config.embedding_dim
self.num_attention_heads = config.num_attention_heads
self.q_noise = config.q_noise
self.qn_block_size = config.qn_block_size
self.pre_layernorm = config.pre_layernorm
self.dropout_module = torch.nn.Dropout(p=config.dropout, inplace=False)
self.activation_dropout_module = torch.nn.Dropout(p=config.activation_dropout, inplace=False)
self.activation_fn = ACT2FN[config.activation_fn]
self.self_attn = GraphormerMultiheadAttention(config)
self.self_attn_layer_norm = nn.LayerNorm(self.embedding_dim)
self.fc1 = self.build_fc(self.embedding_dim, config.ffn_embedding_dim, q_noise=config.q_noise, qn_block_size=config.qn_block_size)
self.fc2 = self.build_fc(config.ffn_embedding_dim, self.embedding_dim, q_noise=config.q_noise, qn_block_size=config.qn_block_size)
self.final_layer_norm = nn.LayerNorm(self.embedding_dim)
def build_fc(self, input_dim: int, output_dim: int, q_noise: float, qn_block_size: int) -> Union[nn.Module, nn.Linear, nn.Embedding, nn.Conv2d]:
return quant_noise(nn.Linear(input_dim, output_dim), q_noise, qn_block_size)
def forward(self, input_nodes: torch.Tensor, self_attn_bias: Optional[torch.Tensor]=None, self_attn_mask: Optional[torch.Tensor]=None, self_attn_padding_mask: Optional[torch.Tensor]=None) -> tuple[torch.Tensor, Optional[torch.Tensor]]:
"""
nn.LayerNorm is applied either before or after the self-attention/ffn modules similar to the original
Transformer implementation.
"""
residual = input_nodes
if self.pre_layernorm:
input_nodes = self.self_attn_layer_norm(input_nodes)
input_nodes, attn = self.self_attn(query=input_nodes, key=input_nodes, value=input_nodes, attn_bias=self_attn_bias, key_padding_mask=self_attn_padding_mask, need_weights=False, attn_mask=self_attn_mask)
input_nodes = self.dropout_module(input_nodes)
input_nodes = residual + input_nodes
if not self.pre_layernorm:
input_nodes = self.self_attn_layer_norm(input_nodes)
residual = input_nodes
if self.pre_layernorm:
input_nodes = self.final_layer_norm(input_nodes)
input_nodes = self.activation_fn(self.fc1(input_nodes))
input_nodes = self.activation_dropout_module(input_nodes)
input_nodes = self.fc2(input_nodes)
input_nodes = self.dropout_module(input_nodes)
input_nodes = residual + input_nodes
if not self.pre_layernorm:
input_nodes = self.final_layer_norm(input_nodes)
return (input_nodes, attn)
|
class GraphormerGraphEncoderLayer(nn.Module):
def __init__(self, config: GraphormerConfig) -> None:
pass
def build_fc(self, input_dim: int, output_dim: int, q_noise: float, qn_block_size: int) -> Union[nn.Module, nn.Linear, nn.Embedding, nn.Conv2d]:
pass
def forward(self, input_nodes: torch.Tensor, self_attn_bias: Optional[torch.Tensor]=None, self_attn_mask: Optional[torch.Tensor]=None, self_attn_padding_mask: Optional[torch.Tensor]=None) -> tuple[torch.Tensor, Optional[torch.Tensor]]:
'''
nn.LayerNorm is applied either before or after the self-attention/ffn modules similar to the original
Transformer implementation.
'''
pass
| 4
| 1
| 27
| 3
| 21
| 3
| 2
| 0.13
| 1
| 6
| 2
| 0
| 3
| 13
| 3
| 13
| 84
| 12
| 64
| 27
| 52
| 8
| 38
| 19
| 34
| 5
| 1
| 1
| 7
|
1,757
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/graphormer/modeling_graphormer.py
|
transformers.models.deprecated.graphormer.modeling_graphormer.GraphormerGraphNodeFeature
|
from .configuration_graphormer import GraphormerConfig
import torch
import torch.nn as nn
class GraphormerGraphNodeFeature(nn.Module):
"""
Compute node features for each node in the graph.
"""
def __init__(self, config: GraphormerConfig):
super().__init__()
self.num_heads = config.num_attention_heads
self.num_atoms = config.num_atoms
self.atom_encoder = nn.Embedding(config.num_atoms + 1, config.hidden_size, padding_idx=config.pad_token_id)
self.in_degree_encoder = nn.Embedding(config.num_in_degree, config.hidden_size, padding_idx=config.pad_token_id)
self.out_degree_encoder = nn.Embedding(config.num_out_degree, config.hidden_size, padding_idx=config.pad_token_id)
self.graph_token = nn.Embedding(1, config.hidden_size)
def forward(self, input_nodes: torch.LongTensor, in_degree: torch.LongTensor, out_degree: torch.LongTensor) -> torch.Tensor:
n_graph, n_node = input_nodes.size()[:2]
node_feature = self.atom_encoder(input_nodes).sum(dim=-2) + self.in_degree_encoder(in_degree) + self.out_degree_encoder(out_degree)
graph_token_feature = self.graph_token.weight.unsqueeze(0).repeat(n_graph, 1, 1)
graph_node_feature = torch.cat([graph_token_feature, node_feature], dim=1)
return graph_node_feature
|
class GraphormerGraphNodeFeature(nn.Module):
'''
Compute node features for each node in the graph.
'''
def __init__(self, config: GraphormerConfig):
pass
def forward(self, input_nodes: torch.LongTensor, in_degree: torch.LongTensor, out_degree: torch.LongTensor) -> torch.Tensor:
pass
| 3
| 1
| 17
| 3
| 14
| 1
| 1
| 0.18
| 1
| 3
| 1
| 0
| 2
| 6
| 2
| 12
| 39
| 8
| 28
| 18
| 20
| 5
| 15
| 13
| 12
| 1
| 1
| 0
| 2
|
1,758
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/graphormer/modeling_graphormer.py
|
transformers.models.deprecated.graphormer.modeling_graphormer.GraphormerModel
|
import torch
from .configuration_graphormer import GraphormerConfig
from ....activations import ACT2FN
from ....modeling_outputs import BaseModelOutputWithNoAttention, SequenceClassifierOutput
from typing import Optional, Union
import torch.nn as nn
class GraphormerModel(GraphormerPreTrainedModel):
"""The Graphormer model is a graph-encoder model.
It goes from a graph to its representation. If you want to use the model for a downstream classification task, use
GraphormerForGraphClassification instead. For any other downstream task, feel free to add a new class, or combine
this model with a downstream model of your choice, following the example in GraphormerForGraphClassification.
"""
def __init__(self, config: GraphormerConfig):
super().__init__(config)
self.max_nodes = config.max_nodes
self.graph_encoder = GraphormerGraphEncoder(config)
self.share_input_output_embed = config.share_input_output_embed
self.lm_output_learned_bias = None
self.load_softmax = not getattr(config, 'remove_head', False)
self.lm_head_transform_weight = nn.Linear(config.embedding_dim, config.embedding_dim)
self.activation_fn = ACT2FN[config.activation_fn]
self.layer_norm = nn.LayerNorm(config.embedding_dim)
self.post_init()
def reset_output_layer_parameters(self):
self.lm_output_learned_bias = nn.Parameter(torch.zeros(1))
def forward(self, input_nodes: torch.LongTensor, input_edges: torch.LongTensor, attn_bias: torch.Tensor, in_degree: torch.LongTensor, out_degree: torch.LongTensor, spatial_pos: torch.LongTensor, attn_edge_type: torch.LongTensor, perturb: Optional[torch.FloatTensor]=None, masked_tokens: None=None, return_dict: Optional[bool]=None, **unused) -> Union[tuple[torch.LongTensor], BaseModelOutputWithNoAttention]:
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
inner_states, graph_rep = self.graph_encoder(input_nodes, input_edges, attn_bias, in_degree, out_degree, spatial_pos, attn_edge_type, perturb=perturb)
input_nodes = inner_states[-1].transpose(0, 1)
if masked_tokens is not None:
raise NotImplementedError
input_nodes = self.layer_norm(self.activation_fn(self.lm_head_transform_weight(input_nodes)))
if self.share_input_output_embed and hasattr(self.graph_encoder.embed_tokens, 'weight'):
input_nodes = torch.nn.functional.linear(input_nodes, self.graph_encoder.embed_tokens.weight)
if not return_dict:
return tuple((x for x in [input_nodes, inner_states] if x is not None))
return BaseModelOutputWithNoAttention(last_hidden_state=input_nodes, hidden_states=inner_states)
def max_nodes(self):
"""Maximum output length supported by the encoder."""
return self.max_nodes
|
class GraphormerModel(GraphormerPreTrainedModel):
'''The Graphormer model is a graph-encoder model.
It goes from a graph to its representation. If you want to use the model for a downstream classification task, use
GraphormerForGraphClassification instead. For any other downstream task, feel free to add a new class, or combine
this model with a downstream model of your choice, following the example in GraphormerForGraphClassification.
'''
def __init__(self, config: GraphormerConfig):
pass
def reset_output_layer_parameters(self):
pass
def forward(self, input_nodes: torch.LongTensor, input_edges: torch.LongTensor, attn_bias: torch.Tensor, in_degree: torch.LongTensor, out_degree: torch.LongTensor, spatial_pos: torch.LongTensor, attn_edge_type: torch.LongTensor, perturb: Optional[torch.FloatTensor]=None, masked_tokens: None=None, return_dict: Optional[bool]=None, **unused) -> Union[tuple[torch.LongTensor], BaseModelOutputWithNoAttention]:
pass
def max_nodes(self):
'''Maximum output length supported by the encoder.'''
pass
| 5
| 2
| 15
| 3
| 11
| 1
| 2
| 0.23
| 1
| 8
| 3
| 0
| 4
| 7
| 4
| 136
| 69
| 16
| 43
| 26
| 25
| 10
| 28
| 13
| 23
| 5
| 3
| 1
| 8
|
1,759
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/graphormer/modeling_graphormer.py
|
transformers.models.deprecated.graphormer.modeling_graphormer.GraphormerMultiheadAttention
|
from typing import Optional, Union
import torch.nn as nn
import torch
import math
from .configuration_graphormer import GraphormerConfig
class GraphormerMultiheadAttention(nn.Module):
"""Multi-headed attention.
See "Attention Is All You Need" for more details.
"""
def __init__(self, config: GraphormerConfig):
super().__init__()
self.embedding_dim = config.embedding_dim
self.kdim = config.kdim if config.kdim is not None else config.embedding_dim
self.vdim = config.vdim if config.vdim is not None else config.embedding_dim
self.qkv_same_dim = self.kdim == config.embedding_dim and self.vdim == config.embedding_dim
self.num_heads = config.num_attention_heads
self.attention_dropout_module = torch.nn.Dropout(p=config.attention_dropout, inplace=False)
self.head_dim = config.embedding_dim // config.num_attention_heads
if not self.head_dim * config.num_attention_heads == self.embedding_dim:
raise AssertionError('The embedding_dim must be divisible by num_heads.')
self.scaling = self.head_dim ** (-0.5)
self.self_attention = True
if not self.self_attention:
raise NotImplementedError('The Graphormer model only supports self attention for now.')
if self.self_attention and (not self.qkv_same_dim):
raise AssertionError('Self-attention requires query, key and value to be of the same size.')
self.k_proj = quant_noise(nn.Linear(self.kdim, config.embedding_dim, bias=config.bias), config.q_noise, config.qn_block_size)
self.v_proj = quant_noise(nn.Linear(self.vdim, config.embedding_dim, bias=config.bias), config.q_noise, config.qn_block_size)
self.q_proj = quant_noise(nn.Linear(config.embedding_dim, config.embedding_dim, bias=config.bias), config.q_noise, config.qn_block_size)
self.out_proj = quant_noise(nn.Linear(config.embedding_dim, config.embedding_dim, bias=config.bias), config.q_noise, config.qn_block_size)
self.onnx_trace = False
def reset_parameters(self):
if self.qkv_same_dim:
nn.init.xavier_uniform_(self.k_proj.weight, gain=1 / math.sqrt(2))
nn.init.xavier_uniform_(self.v_proj.weight, gain=1 / math.sqrt(2))
nn.init.xavier_uniform_(self.q_proj.weight, gain=1 / math.sqrt(2))
else:
nn.init.xavier_uniform_(self.k_proj.weight)
nn.init.xavier_uniform_(self.v_proj.weight)
nn.init.xavier_uniform_(self.q_proj.weight)
nn.init.xavier_uniform_(self.out_proj.weight)
if self.out_proj.bias is not None:
nn.init.constant_(self.out_proj.bias, 0.0)
def forward(self, query: torch.LongTensor, key: Optional[torch.Tensor], value: Optional[torch.Tensor], attn_bias: Optional[torch.Tensor], key_padding_mask: Optional[torch.Tensor]=None, need_weights: bool=True, attn_mask: Optional[torch.Tensor]=None, before_softmax: bool=False, need_head_weights: bool=False) -> tuple[torch.Tensor, Optional[torch.Tensor]]:
"""
Args:
key_padding_mask (Bytetorch.Tensor, optional): mask to exclude
keys that are pads, of shape `(batch, src_len)`, where padding elements are indicated by 1s.
need_weights (bool, optional): return the attention weights,
averaged over heads (default: False).
attn_mask (Bytetorch.Tensor, optional): typically used to
implement causal attention, where the mask prevents the attention from looking forward in time
(default: None).
before_softmax (bool, optional): return the raw attention
weights and values before the attention softmax.
need_head_weights (bool, optional): return the attention
weights for each head. Implies *need_weights*. Default: return the average attention weights over all
heads.
"""
if need_head_weights:
need_weights = True
tgt_len, bsz, embedding_dim = query.size()
src_len = tgt_len
if not embedding_dim == self.embedding_dim:
raise AssertionError(f'The query embedding dimension {embedding_dim} is not equal to the expected embedding_dim {self.embedding_dim}.')
if not list(query.size()) == [tgt_len, bsz, embedding_dim]:
raise AssertionError('Query size incorrect in Graphormer, compared to model dimensions.')
if key is not None:
src_len, key_bsz, _ = key.size()
if not torch.jit.is_scripting():
if key_bsz != bsz or value is None or (not (src_len, bsz == value.shape[:2])):
raise AssertionError('The batch shape does not match the key or value shapes provided to the attention.')
q = self.q_proj(query)
k = self.k_proj(query)
v = self.v_proj(query)
q *= self.scaling
q = q.contiguous().view(tgt_len, bsz * self.num_heads, self.head_dim).transpose(0, 1)
if k is not None:
k = k.contiguous().view(-1, bsz * self.num_heads, self.head_dim).transpose(0, 1)
if v is not None:
v = v.contiguous().view(-1, bsz * self.num_heads, self.head_dim).transpose(0, 1)
if k is None or not k.size(1) == src_len:
raise AssertionError('The shape of the key generated in the attention is incorrect')
if key_padding_mask is not None and key_padding_mask.dim() == 0:
key_padding_mask = None
if key_padding_mask is not None:
if key_padding_mask.size(0) != bsz or key_padding_mask.size(1) != src_len:
raise AssertionError('The shape of the generated padding mask for the key does not match expected dimensions.')
attn_weights = torch.bmm(q, k.transpose(1, 2))
attn_weights = self.apply_sparse_mask(attn_weights, tgt_len, src_len, bsz)
if list(attn_weights.size()) != [bsz * self.num_heads, tgt_len, src_len]:
raise AssertionError('The attention weights generated do not match the expected dimensions.')
if attn_bias is not None:
attn_weights += attn_bias.view(bsz * self.num_heads, tgt_len, src_len)
if attn_mask is not None:
attn_mask = attn_mask.unsqueeze(0)
attn_weights += attn_mask
if key_padding_mask is not None:
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights.masked_fill(key_padding_mask.unsqueeze(1).unsqueeze(2).to(torch.bool), float('-inf'))
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
if before_softmax:
return (attn_weights, v)
attn_weights_float = torch.nn.functional.softmax(attn_weights, dim=-1)
attn_weights = attn_weights_float.type_as(attn_weights)
attn_probs = self.attention_dropout_module(attn_weights)
if v is None:
raise AssertionError('No value generated')
attn = torch.bmm(attn_probs, v)
if list(attn.size()) != [bsz * self.num_heads, tgt_len, self.head_dim]:
raise AssertionError('The attention generated do not match the expected dimensions.')
attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, embedding_dim)
attn: torch.Tensor = self.out_proj(attn)
attn_weights = None
if need_weights:
attn_weights = attn_weights_float.contiguous().view(bsz, self.num_heads, tgt_len, src_len).transpose(1, 0)
if not need_head_weights:
attn_weights = attn_weights.mean(dim=0)
return (attn, attn_weights)
def apply_sparse_mask(self, attn_weights: torch.Tensor, tgt_len: int, src_len: int, bsz: int) -> torch.Tensor:
return attn_weights
|
class GraphormerMultiheadAttention(nn.Module):
'''Multi-headed attention.
See "Attention Is All You Need" for more details.
'''
def __init__(self, config: GraphormerConfig):
pass
def reset_parameters(self):
pass
def forward(self, query: torch.LongTensor, key: Optional[torch.Tensor], value: Optional[torch.Tensor], attn_bias: Optional[torch.Tensor], key_padding_mask: Optional[torch.Tensor]=None, need_weights: bool=True, attn_mask: Optional[torch.Tensor]=None, before_softmax: bool=False, need_head_weights: bool=False) -> tuple[torch.Tensor, Optional[torch.Tensor]]:
'''
Args:
key_padding_mask (Bytetorch.Tensor, optional): mask to exclude
keys that are pads, of shape `(batch, src_len)`, where padding elements are indicated by 1s.
need_weights (bool, optional): return the attention weights,
averaged over heads (default: False).
attn_mask (Bytetorch.Tensor, optional): typically used to
implement causal attention, where the mask prevents the attention from looking forward in time
(default: None).
before_softmax (bool, optional): return the raw attention
weights and values before the attention softmax.
need_head_weights (bool, optional): return the attention
weights for each head. Implies *need_weights*. Default: return the average attention weights over all
heads.
'''
pass
def apply_sparse_mask(self, attn_weights: torch.Tensor, tgt_len: int, src_len: int, bsz: int) -> torch.Tensor:
pass
| 5
| 2
| 45
| 6
| 33
| 6
| 8
| 0.19
| 1
| 9
| 1
| 0
| 4
| 14
| 4
| 14
| 188
| 30
| 134
| 40
| 118
| 25
| 97
| 29
| 92
| 22
| 1
| 3
| 32
|
1,760
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/graphormer/modeling_graphormer.py
|
transformers.models.deprecated.graphormer.modeling_graphormer.GraphormerPreTrainedModel
|
import torch.nn as nn
import torch
from ....modeling_utils import PreTrainedModel
from typing import Optional, Union
from .configuration_graphormer import GraphormerConfig
class GraphormerPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config: GraphormerConfig
base_model_prefix = 'graphormer'
main_input_name_nodes = 'input_nodes'
main_input_name_edges = 'input_edges'
def normal_(self, data: torch.Tensor):
data.copy_(data.cpu().normal_(mean=0.0, std=0.02).to(data.device))
def init_graphormer_params(self, module: Union[nn.Linear, nn.Embedding, GraphormerMultiheadAttention]):
"""
Initialize the weights specific to the Graphormer Model.
"""
if isinstance(module, nn.Linear):
self.normal_(module.weight.data)
if module.bias is not None:
module.bias.data.zero_()
if isinstance(module, nn.Embedding):
self.normal_(module.weight.data)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
if isinstance(module, GraphormerMultiheadAttention):
self.normal_(module.q_proj.weight.data)
self.normal_(module.k_proj.weight.data)
self.normal_(module.v_proj.weight.data)
def _init_weights(self, module: Union[nn.Linear, nn.Conv2d, nn.Embedding, nn.LayerNorm, GraphormerMultiheadAttention, GraphormerGraphEncoder]):
"""
Initialize the weights
"""
if isinstance(module, (nn.Linear, nn.Conv2d)):
module.weight.data.normal_(mean=0.0, std=0.02)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=0.02)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, GraphormerMultiheadAttention):
module.q_proj.weight.data.normal_(mean=0.0, std=0.02)
module.k_proj.weight.data.normal_(mean=0.0, std=0.02)
module.v_proj.weight.data.normal_(mean=0.0, std=0.02)
module.reset_parameters()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
elif isinstance(module, GraphormerGraphEncoder):
if module.apply_graphormer_init:
module.apply(self.init_graphormer_params)
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
|
class GraphormerPreTrainedModel(PreTrainedModel):
'''
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
'''
def normal_(self, data: torch.Tensor):
pass
def init_graphormer_params(self, module: Union[nn.Linear, nn.Embedding, GraphormerMultiheadAttention]):
'''
Initialize the weights specific to the Graphormer Model.
'''
pass
def _init_weights(self, module: Union[nn.Linear, nn.Conv2d, nn.Embedding, nn.LayerNorm, GraphormerMultiheadAttention, GraphormerGraphEncoder]):
'''
Initialize the weights
'''
pass
| 4
| 3
| 18
| 0
| 14
| 3
| 6
| 0.27
| 1
| 3
| 2
| 2
| 3
| 0
| 3
| 132
| 66
| 5
| 48
| 13
| 39
| 13
| 38
| 8
| 34
| 10
| 2
| 2
| 17
|
1,761
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/graphormer/modeling_graphormer.py
|
transformers.models.deprecated.graphormer.modeling_graphormer.LayerDropModuleList
|
import torch
from collections.abc import Iterable, Iterator
import torch.nn as nn
from typing import Optional, Union
class LayerDropModuleList(nn.ModuleList):
"""
From:
https://github.com/facebookresearch/fairseq/blob/dd0079bde7f678b0cd0715cbd0ae68d661b7226d/fairseq/modules/layer_drop.py
A LayerDrop implementation based on [`torch.nn.ModuleList`]. LayerDrop as described in
https://huggingface.co/papers/1909.11556.
We refresh the choice of which layers to drop every time we iterate over the LayerDropModuleList instance. During
evaluation we always iterate over all layers.
Usage:
```python
layers = LayerDropList(p=0.5, modules=[layer1, layer2, layer3])
for layer in layers: # this might iterate over layers 1 and 3
x = layer(x)
for layer in layers: # this might iterate over all layers
x = layer(x)
for layer in layers: # this might not iterate over any layers
x = layer(x)
```
Args:
p (float): probability of dropping out each layer
modules (iterable, optional): an iterable of modules to add
"""
def __init__(self, p: float, modules: Optional[Iterable[nn.Module]]=None):
super().__init__(modules)
self.p = p
def __iter__(self) -> Iterator[nn.Module]:
dropout_probs = torch.empty(len(self)).uniform_()
for i, m in enumerate(super().__iter__()):
if not self.training or dropout_probs[i] > self.p:
yield m
|
class LayerDropModuleList(nn.ModuleList):
'''
From:
https://github.com/facebookresearch/fairseq/blob/dd0079bde7f678b0cd0715cbd0ae68d661b7226d/fairseq/modules/layer_drop.py
A LayerDrop implementation based on [`torch.nn.ModuleList`]. LayerDrop as described in
https://huggingface.co/papers/1909.11556.
We refresh the choice of which layers to drop every time we iterate over the LayerDropModuleList instance. During
evaluation we always iterate over all layers.
Usage:
```python
layers = LayerDropList(p=0.5, modules=[layer1, layer2, layer3])
for layer in layers: # this might iterate over layers 1 and 3
x = layer(x)
for layer in layers: # this might iterate over all layers
x = layer(x)
for layer in layers: # this might not iterate over any layers
x = layer(x)
```
Args:
p (float): probability of dropping out each layer
modules (iterable, optional): an iterable of modules to add
'''
def __init__(self, p: float, modules: Optional[Iterable[nn.Module]]=None):
pass
def __iter__(self) -> Iterator[nn.Module]:
pass
| 3
| 1
| 4
| 0
| 4
| 0
| 2
| 2.33
| 1
| 3
| 0
| 0
| 2
| 1
| 2
| 2
| 36
| 6
| 9
| 6
| 6
| 21
| 9
| 6
| 6
| 3
| 1
| 2
| 4
|
1,762
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/jukebox/configuration_jukebox.py
|
transformers.models.deprecated.jukebox.configuration_jukebox.JukeboxConfig
|
from ....configuration_utils import PretrainedConfig
class JukeboxConfig(PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`JukeboxModel`].
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information. Instantiating a configuration with the defaults will
yield a similar configuration to that of
[openai/jukebox-1b-lyrics](https://huggingface.co/openai/jukebox-1b-lyrics) architecture.
The downsampling and stride are used to determine downsampling of the input sequence. For example, downsampling =
(5,3), and strides = (2, 2) will downsample the audio by 2^5 = 32 to get the first level of codes, and 2**8 = 256
to get the second level codes. This is mostly true for training the top level prior and the upsamplers.
Args:
vqvae_config (`JukeboxVQVAEConfig`, *optional*):
Configuration for the `JukeboxVQVAE` model.
prior_config_list (`List[JukeboxPriorConfig]`, *optional*):
List of the configs for each of the `JukeboxPrior` of the model. The original architecture uses 3 priors.
nb_priors (`int`, *optional*, defaults to 3):
Number of prior models that will sequentially sample tokens. Each prior is conditional auto regressive
(decoder) model, apart from the top prior, which can include a lyric encoder. The available models were
trained using a top prior and 2 upsampler priors.
sampling_rate (`int`, *optional*, defaults to 44100):
Sampling rate of the raw audio.
timing_dims (`int`, *optional*, defaults to 64):
Dimensions of the JukeboxRangeEmbedding layer which is equivalent to traditional positional embedding
layer. The timing embedding layer converts the absolute and relative position in the currently sampled
audio to a tensor of length `timing_dims` that will be added to the music tokens.
min_duration (`int`, *optional*, defaults to 0):
Minimum duration of the audios to generate
max_duration (`float`, *optional*, defaults to 600.0):
Maximum duration of the audios to generate
max_nb_genres (`int`, *optional*, defaults to 5):
Maximum number of genres that can be used to condition a single sample.
metadata_conditioning (`bool`, *optional*, defaults to `True`):
Whether or not to use metadata conditioning, corresponding to the artist, the genre and the min/maximum
duration.
Example:
```python
>>> from transformers import JukeboxModel, JukeboxConfig
>>> # Initializing a Jukebox configuration
>>> configuration = JukeboxConfig()
>>> # Initializing a model from the configuration
>>> model = JukeboxModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```
"""
model_type = 'jukebox'
def __init__(self, vqvae_config=None, prior_config_list=None, nb_priors=3, sampling_rate=44100, timing_dims=64, min_duration=0, max_duration=600.0, max_nb_genres=5, metadata_conditioning=True, **kwargs):
if vqvae_config is None:
vqvae_config = {}
logger.info('vqvae_config is None. initializing the JukeboxVQVAE with default values.')
self.vqvae_config = JukeboxVQVAEConfig(**vqvae_config)
if prior_config_list is not None:
self.prior_configs = [JukeboxPriorConfig(**prior_config) for prior_config in prior_config_list]
else:
self.prior_configs = []
for prior_idx in range(nb_priors):
prior_config = kwargs.pop(f'prior_{prior_idx}', None)
if prior_config is None:
prior_config = {}
logger.info(f"prior_{prior_idx}'s config is None. Initializing the JukeboxPriorConfig list with default values.")
self.prior_configs.append(JukeboxPriorConfig(**prior_config))
self.hop_fraction = self.vqvae_config.hop_fraction
self.nb_priors = nb_priors
self.max_nb_genres = max_nb_genres
self.sampling_rate = sampling_rate
self.timing_dims = timing_dims
self.min_duration = min_duration
self.max_duration = max_duration
self.metadata_conditioning = metadata_conditioning
super().__init__(**kwargs)
@classmethod
def from_configs(cls, prior_configs: list[JukeboxPriorConfig], vqvae_config: JukeboxVQVAEConfig, **kwargs):
"""
Instantiate a [`JukeboxConfig`] (or a derived class) from clip text model configuration and clip vision model
configuration.
Returns:
[`JukeboxConfig`]: An instance of a configuration object
"""
prior_config_list = [config.to_dict() for config in prior_configs]
return cls(prior_config_list=prior_config_list, vqvae_config_dict=vqvae_config.to_dict(), **kwargs)
def to_dict(self):
result = super().to_dict()
result['prior_config_list'] = [config.to_dict() for config in result.pop('prior_configs')]
return result
|
class JukeboxConfig(PretrainedConfig):
'''
This is the configuration class to store the configuration of a [`JukeboxModel`].
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information. Instantiating a configuration with the defaults will
yield a similar configuration to that of
[openai/jukebox-1b-lyrics](https://huggingface.co/openai/jukebox-1b-lyrics) architecture.
The downsampling and stride are used to determine downsampling of the input sequence. For example, downsampling =
(5,3), and strides = (2, 2) will downsample the audio by 2^5 = 32 to get the first level of codes, and 2**8 = 256
to get the second level codes. This is mostly true for training the top level prior and the upsamplers.
Args:
vqvae_config (`JukeboxVQVAEConfig`, *optional*):
Configuration for the `JukeboxVQVAE` model.
prior_config_list (`List[JukeboxPriorConfig]`, *optional*):
List of the configs for each of the `JukeboxPrior` of the model. The original architecture uses 3 priors.
nb_priors (`int`, *optional*, defaults to 3):
Number of prior models that will sequentially sample tokens. Each prior is conditional auto regressive
(decoder) model, apart from the top prior, which can include a lyric encoder. The available models were
trained using a top prior and 2 upsampler priors.
sampling_rate (`int`, *optional*, defaults to 44100):
Sampling rate of the raw audio.
timing_dims (`int`, *optional*, defaults to 64):
Dimensions of the JukeboxRangeEmbedding layer which is equivalent to traditional positional embedding
layer. The timing embedding layer converts the absolute and relative position in the currently sampled
audio to a tensor of length `timing_dims` that will be added to the music tokens.
min_duration (`int`, *optional*, defaults to 0):
Minimum duration of the audios to generate
max_duration (`float`, *optional*, defaults to 600.0):
Maximum duration of the audios to generate
max_nb_genres (`int`, *optional*, defaults to 5):
Maximum number of genres that can be used to condition a single sample.
metadata_conditioning (`bool`, *optional*, defaults to `True`):
Whether or not to use metadata conditioning, corresponding to the artist, the genre and the min/maximum
duration.
Example:
```python
>>> from transformers import JukeboxModel, JukeboxConfig
>>> # Initializing a Jukebox configuration
>>> configuration = JukeboxConfig()
>>> # Initializing a model from the configuration
>>> model = JukeboxModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```
'''
def __init__(self, vqvae_config=None, prior_config_list=None, nb_priors=3, sampling_rate=44100, timing_dims=64, min_duration=0, max_duration=600.0, max_nb_genres=5, metadata_conditioning=True, **kwargs):
pass
@classmethod
def from_configs(cls, prior_configs: list[JukeboxPriorConfig], vqvae_config: JukeboxVQVAEConfig, **kwargs):
'''
Instantiate a [`JukeboxConfig`] (or a derived class) from clip text model configuration and clip vision model
configuration.
Returns:
[`JukeboxConfig`]: An instance of a configuration object
'''
pass
def to_dict(self):
pass
| 5
| 2
| 20
| 2
| 15
| 3
| 2
| 1.06
| 1
| 4
| 2
| 0
| 2
| 10
| 3
| 35
| 120
| 19
| 49
| 31
| 32
| 52
| 32
| 18
| 28
| 5
| 2
| 3
| 7
|
1,763
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/jukebox/configuration_jukebox.py
|
transformers.models.deprecated.jukebox.configuration_jukebox.JukeboxPriorConfig
|
from typing import Union
from ....configuration_utils import PretrainedConfig
import os
class JukeboxPriorConfig(PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`JukeboxPrior`]. It is used to instantiate a
`JukeboxPrior` according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the top level prior from the
[openai/jukebox-1b-lyrics](https://huggingface.co/openai/jukebox
-1b-lyrics) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
act_fn (`str`, *optional*, defaults to `"quick_gelu"`):
Activation function.
alignment_head (`int`, *optional*, defaults to 2):
Head that is responsible of the alignment between lyrics and music. Only used to compute the lyric to audio
alignment
alignment_layer (`int`, *optional*, defaults to 68):
Index of the layer that is responsible of the alignment between lyrics and music. Only used to compute the
lyric to audio alignment
attention_multiplier (`float`, *optional*, defaults to 0.25):
Multiplier coefficient used to define the hidden dimension of the attention layers. 0.25 means that
0.25*width of the model will be used.
attention_pattern (`str`, *optional*, defaults to `"enc_dec_with_lyrics"`):
Which attention pattern to use for the decoder/
attn_dropout (`int`, *optional*, defaults to 0):
Dropout probability for the post-attention layer dropout in the decoder.
attn_res_scale (`bool`, *optional*, defaults to `False`):
Whether or not to scale the residuals in the attention conditioner block.
blocks (`int`, *optional*, defaults to 64):
Number of blocks used in the `block_attn`. A sequence of length seq_len is factored as `[blocks, seq_len //
blocks]` in the `JukeboxAttention` layer.
conv_res_scale (`int`, *optional*):
Whether or not to scale the residuals in the conditioner block. Since the top level prior does not have a
conditioner, the default value is to None and should not be modified.
num_layers (`int`, *optional*, defaults to 72):
Number of layers of the transformer architecture.
emb_dropout (`int`, *optional*, defaults to 0):
Embedding dropout used in the lyric decoder.
encoder_config (`JukeboxPriorConfig`, *optional*) :
Configuration of the encoder which models the prior on the lyrics.
encoder_loss_fraction (`float`, *optional*, defaults to 0.4):
Multiplication factor used in front of the lyric encoder loss.
hidden_size (`int`, *optional*, defaults to 2048):
Hidden dimension of the attention layers.
init_scale (`float`, *optional*, defaults to 0.2):
Initialization scales for the prior modules.
is_encoder_decoder (`bool`, *optional*, defaults to `True`):
Whether or not the prior is an encoder-decoder model. In case it is not, and `nb_relevant_lyric_tokens` is
greater than 0, the `encoder` args should be specified for the lyric encoding.
mask (`bool`, *optional*, defaults to `False`):
Whether or not to mask the previous positions in the attention.
max_duration (`int`, *optional*, defaults to 600):
Maximum supported duration of the generated song in seconds.
max_nb_genres (`int`, *optional*, defaults to 1):
Maximum number of genres that can be used to condition the model.
merged_decoder (`bool`, *optional*, defaults to `True`):
Whether or not the decoder and the encoder inputs are merged. This is used for the separated
encoder-decoder architecture
metadata_conditioning (`bool`, *optional*, defaults to `True)`:
Whether or not to condition on the artist and genre metadata.
metadata_dims (`List[int]`, *optional*, defaults to `[604, 7898]`):
Number of genres and the number of artists that were used to train the embedding layers of the prior
models.
min_duration (`int`, *optional*, defaults to 0):
Minimum duration of the generated audio on which the model was trained.
mlp_multiplier (`float`, *optional*, defaults to 1.0):
Multiplier coefficient used to define the hidden dimension of the MLP layers. 0.25 means that 0.25*width of
the model will be used.
music_vocab_size (`int`, *optional*, defaults to 2048):
Number of different music tokens. Should be similar to the `JukeboxVQVAEConfig.nb_discrete_codes`.
n_ctx (`int`, *optional*, defaults to 6144):
Number of context tokens for each prior. The context tokens are the music tokens that are attended to when
generating music tokens.
n_heads (`int`, *optional*, defaults to 2):
Number of attention heads.
nb_relevant_lyric_tokens (`int`, *optional*, defaults to 384):
Number of lyric tokens that are used when sampling a single window of length `n_ctx`
res_conv_depth (`int`, *optional*, defaults to 3):
Depth of the `JukeboxDecoderConvBock` used to upsample the previously sampled audio in the
`JukeboxMusicTokenConditioner`.
res_conv_width (`int`, *optional*, defaults to 128):
Width of the `JukeboxDecoderConvBock` used to upsample the previously sampled audio in the
`JukeboxMusicTokenConditioner`.
res_convolution_multiplier (`int`, *optional*, defaults to 1):
Multiplier used to scale the `hidden_dim` of the `JukeboxResConv1DBlock`.
res_dilation_cycle (`int`, *optional*):
Dilation cycle used to define the `JukeboxMusicTokenConditioner`. Usually similar to the ones used in the
corresponding level of the VQVAE. The first prior does not use it as it is not conditioned on upper level
tokens.
res_dilation_growth_rate (`int`, *optional*, defaults to 1):
Dilation grow rate used between each convolutionnal block of the `JukeboxMusicTokenConditioner`
res_downs_t (`List[int]`, *optional*, defaults to `[3, 2, 2]`):
Downsampling rates used in the audio conditioning network
res_strides_t (`List[int]`, *optional*, defaults to `[2, 2, 2]`):
Striding used in the audio conditioning network
resid_dropout (`int`, *optional*, defaults to 0):
Residual dropout used in the attention pattern.
sampling_rate (`int`, *optional*, defaults to 44100):
Sampling rate used for training.
spread (`int`, *optional*):
Spread used in the `summary_spread_attention` pattern
timing_dims (`int`, *optional*, defaults to 64):
Dimension of the timing embedding.
zero_out (`bool`, *optional*, defaults to `False`):
Whether or not to zero out convolution weights when initializing.
"""
model_type = 'jukebox_prior'
attribute_map = {'max_position_embeddings': 'n_positions', 'num_attention_heads': 'n_head'}
def __init__(self, act_fn='quick_gelu', level=0, alignment_head=2, alignment_layer=68, attention_multiplier=0.25, attention_pattern='enc_dec_with_lyrics', attn_dropout=0, attn_res_scale=False, blocks=64, conv_res_scale=None, num_layers=72, emb_dropout=0, encoder_config=None, encoder_loss_fraction=0.4, hidden_size=2048, init_scale=0.2, is_encoder_decoder=True, lyric_vocab_size=80, mask=False, max_duration=600, max_nb_genres=1, merged_decoder=True, metadata_conditioning=True, metadata_dims=[604, 7898], min_duration=0, mlp_multiplier=1.0, music_vocab_size=2048, n_ctx=6144, n_heads=2, nb_relevant_lyric_tokens=384, res_conv_depth=3, res_conv_width=128, res_convolution_multiplier=1, res_dilation_cycle=None, res_dilation_growth_rate=1, res_downs_t=[3, 2, 2], res_strides_t=[2, 2, 2], resid_dropout=0, sampling_rate=44100, spread=None, timing_dims=64, zero_out=False, **kwargs):
super().__init__(**kwargs)
self.act_fn = act_fn
self.alignment_head = alignment_head
self.alignment_layer = alignment_layer
self.attention_multiplier = attention_multiplier
self.attention_pattern = attention_pattern
self.attn_dropout = attn_dropout
self.attn_res_scale = attn_res_scale
self.blocks = blocks
self.conv_res_scale = conv_res_scale
self.num_layers = num_layers
self.emb_dropout = emb_dropout
self.music_vocab_size = music_vocab_size
if encoder_config is not None:
self.encoder_config = JukeboxPriorConfig(**encoder_config)
else:
self.encoder_config = None
self.encoder_loss_fraction = encoder_loss_fraction
self.init_scale = init_scale
self.is_encoder_decoder = is_encoder_decoder
self.lyric_vocab_size = lyric_vocab_size
self.level = level
self.mask = mask
self.max_duration = max_duration
self.max_nb_genres = max_nb_genres
self.merged_decoder = merged_decoder
self.metadata_conditioning = metadata_conditioning
self.metadata_dims = metadata_dims
self.min_duration = min_duration
self.mlp_multiplier = mlp_multiplier
self.n_ctx = n_ctx
self.n_heads = n_heads
self.nb_relevant_lyric_tokens = nb_relevant_lyric_tokens
self.res_conv_depth = res_conv_depth
self.res_conv_width = res_conv_width
self.res_convolution_multiplier = res_convolution_multiplier
self.res_dilation_cycle = res_dilation_cycle
self.res_dilation_growth_rate = res_dilation_growth_rate
self.res_downs_t = res_downs_t
self.res_strides_t = res_strides_t
self.resid_dropout = resid_dropout
self.sampling_rate = sampling_rate
self.spread = spread
self.timing_dims = timing_dims
self.hidden_size = hidden_size
self.zero_out = zero_out
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], level=0, **kwargs):
cls._set_token_in_kwargs(kwargs)
config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
if config_dict.get('model_type') == 'jukebox':
config_dict = config_dict[f'prior_{level}']
if 'model_type' in config_dict and hasattr(cls, 'model_type') and (config_dict['model_type'] != cls.model_type):
logger.warning(f"You are using a model of type {config_dict['model_type']} to instantiate a model of type {cls.model_type}. This is not supported for all configurations of models and can yield errors.")
return cls.from_dict(config_dict, **kwargs)
|
class JukeboxPriorConfig(PretrainedConfig):
'''
This is the configuration class to store the configuration of a [`JukeboxPrior`]. It is used to instantiate a
`JukeboxPrior` according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the top level prior from the
[openai/jukebox-1b-lyrics](https://huggingface.co/openai/jukebox
-1b-lyrics) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
act_fn (`str`, *optional*, defaults to `"quick_gelu"`):
Activation function.
alignment_head (`int`, *optional*, defaults to 2):
Head that is responsible of the alignment between lyrics and music. Only used to compute the lyric to audio
alignment
alignment_layer (`int`, *optional*, defaults to 68):
Index of the layer that is responsible of the alignment between lyrics and music. Only used to compute the
lyric to audio alignment
attention_multiplier (`float`, *optional*, defaults to 0.25):
Multiplier coefficient used to define the hidden dimension of the attention layers. 0.25 means that
0.25*width of the model will be used.
attention_pattern (`str`, *optional*, defaults to `"enc_dec_with_lyrics"`):
Which attention pattern to use for the decoder/
attn_dropout (`int`, *optional*, defaults to 0):
Dropout probability for the post-attention layer dropout in the decoder.
attn_res_scale (`bool`, *optional*, defaults to `False`):
Whether or not to scale the residuals in the attention conditioner block.
blocks (`int`, *optional*, defaults to 64):
Number of blocks used in the `block_attn`. A sequence of length seq_len is factored as `[blocks, seq_len //
blocks]` in the `JukeboxAttention` layer.
conv_res_scale (`int`, *optional*):
Whether or not to scale the residuals in the conditioner block. Since the top level prior does not have a
conditioner, the default value is to None and should not be modified.
num_layers (`int`, *optional*, defaults to 72):
Number of layers of the transformer architecture.
emb_dropout (`int`, *optional*, defaults to 0):
Embedding dropout used in the lyric decoder.
encoder_config (`JukeboxPriorConfig`, *optional*) :
Configuration of the encoder which models the prior on the lyrics.
encoder_loss_fraction (`float`, *optional*, defaults to 0.4):
Multiplication factor used in front of the lyric encoder loss.
hidden_size (`int`, *optional*, defaults to 2048):
Hidden dimension of the attention layers.
init_scale (`float`, *optional*, defaults to 0.2):
Initialization scales for the prior modules.
is_encoder_decoder (`bool`, *optional*, defaults to `True`):
Whether or not the prior is an encoder-decoder model. In case it is not, and `nb_relevant_lyric_tokens` is
greater than 0, the `encoder` args should be specified for the lyric encoding.
mask (`bool`, *optional*, defaults to `False`):
Whether or not to mask the previous positions in the attention.
max_duration (`int`, *optional*, defaults to 600):
Maximum supported duration of the generated song in seconds.
max_nb_genres (`int`, *optional*, defaults to 1):
Maximum number of genres that can be used to condition the model.
merged_decoder (`bool`, *optional*, defaults to `True`):
Whether or not the decoder and the encoder inputs are merged. This is used for the separated
encoder-decoder architecture
metadata_conditioning (`bool`, *optional*, defaults to `True)`:
Whether or not to condition on the artist and genre metadata.
metadata_dims (`List[int]`, *optional*, defaults to `[604, 7898]`):
Number of genres and the number of artists that were used to train the embedding layers of the prior
models.
min_duration (`int`, *optional*, defaults to 0):
Minimum duration of the generated audio on which the model was trained.
mlp_multiplier (`float`, *optional*, defaults to 1.0):
Multiplier coefficient used to define the hidden dimension of the MLP layers. 0.25 means that 0.25*width of
the model will be used.
music_vocab_size (`int`, *optional*, defaults to 2048):
Number of different music tokens. Should be similar to the `JukeboxVQVAEConfig.nb_discrete_codes`.
n_ctx (`int`, *optional*, defaults to 6144):
Number of context tokens for each prior. The context tokens are the music tokens that are attended to when
generating music tokens.
n_heads (`int`, *optional*, defaults to 2):
Number of attention heads.
nb_relevant_lyric_tokens (`int`, *optional*, defaults to 384):
Number of lyric tokens that are used when sampling a single window of length `n_ctx`
res_conv_depth (`int`, *optional*, defaults to 3):
Depth of the `JukeboxDecoderConvBock` used to upsample the previously sampled audio in the
`JukeboxMusicTokenConditioner`.
res_conv_width (`int`, *optional*, defaults to 128):
Width of the `JukeboxDecoderConvBock` used to upsample the previously sampled audio in the
`JukeboxMusicTokenConditioner`.
res_convolution_multiplier (`int`, *optional*, defaults to 1):
Multiplier used to scale the `hidden_dim` of the `JukeboxResConv1DBlock`.
res_dilation_cycle (`int`, *optional*):
Dilation cycle used to define the `JukeboxMusicTokenConditioner`. Usually similar to the ones used in the
corresponding level of the VQVAE. The first prior does not use it as it is not conditioned on upper level
tokens.
res_dilation_growth_rate (`int`, *optional*, defaults to 1):
Dilation grow rate used between each convolutionnal block of the `JukeboxMusicTokenConditioner`
res_downs_t (`List[int]`, *optional*, defaults to `[3, 2, 2]`):
Downsampling rates used in the audio conditioning network
res_strides_t (`List[int]`, *optional*, defaults to `[2, 2, 2]`):
Striding used in the audio conditioning network
resid_dropout (`int`, *optional*, defaults to 0):
Residual dropout used in the attention pattern.
sampling_rate (`int`, *optional*, defaults to 44100):
Sampling rate used for training.
spread (`int`, *optional*):
Spread used in the `summary_spread_attention` pattern
timing_dims (`int`, *optional*, defaults to 64):
Dimension of the timing embedding.
zero_out (`bool`, *optional*, defaults to `False`):
Whether or not to zero out convolution weights when initializing.
'''
def __init__(self, act_fn='quick_gelu', level=0, alignment_head=2, alignment_layer=68, attention_multiplier=0.25, attention_pattern='enc_dec_with_lyrics', attn_dropout=0, attn_res_scale=False, blocks=64, conv_res_scale=None, num_layers=72, emb_dropout=0, encoder_config=None, encoder_loss_fraction=0.4, hidden_size=2048, init_scale=0.2, is_encoder_decoder=True, lyric_vocab_size=80, mask=False, max_duration=600, max_nb_genres=1, merged_decoder=True, metadata_conditioning=True, metadata_dims=[604, 7898], min_duration=0, mlp_multiplier=1.0, music_vocab_size=2048, n_ctx=6144, n_heads=2, nb_relevant_lyric_tokens=384, res_conv_depth=3, res_conv_width=128, res_convolution_multiplier=1, res_dilation_cycle=None, res_dilation_growth_rate=1, res_downs_t=[3, 2, 2], res_strides_t=[2, 2, 2], resid_dropout=0, sampling_rate=44100, spread=None, timing_dims=64, zero_out=False, **kwargs):
pass
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], level=0, **kwargs):
pass
| 4
| 1
| 55
| 2
| 52
| 1
| 3
| 0.95
| 1
| 2
| 0
| 0
| 1
| 42
| 2
| 34
| 227
| 11
| 111
| 96
| 60
| 105
| 56
| 48
| 53
| 3
| 2
| 1
| 5
|
1,764
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/jukebox/configuration_jukebox.py
|
transformers.models.deprecated.jukebox.configuration_jukebox.JukeboxVQVAEConfig
|
from ....configuration_utils import PretrainedConfig
import os
from typing import Union
class JukeboxVQVAEConfig(PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`JukeboxVQVAE`]. It is used to instantiate a
`JukeboxVQVAE` according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the VQVAE from
[openai/jukebox-1b-lyrics](https://huggingface.co/openai/jukebox-1b-lyrics) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
act_fn (`str`, *optional*, defaults to `"relu"`):
Activation function of the model.
nb_discrete_codes (`int`, *optional*, defaults to 2048):
Number of codes of the VQVAE.
commit (`float`, *optional*, defaults to 0.02):
Commit loss multiplier.
conv_input_shape (`int`, *optional*, defaults to 1):
Number of audio channels.
conv_res_scale (`bool`, *optional*, defaults to `False`):
Whether or not to scale the residuals of the `JukeboxResConv1DBlock`.
embed_dim (`int`, *optional*, defaults to 64):
Embedding dimension of the codebook vectors.
hop_fraction (`List[int]`, *optional*, defaults to `[0.125, 0.5, 0.5]`):
Fraction of non-intersecting window used when continuing the sampling process.
levels (`int`, *optional*, defaults to 3):
Number of hierarchical levels that used in the VQVAE.
lmu (`float`, *optional*, defaults to 0.99):
Used in the codebook update, exponential moving average coefficient. For more detail refer to Appendix A.1
of the original [VQVAE paper](https://huggingface.co/papers/1711.00937v2.pdf)
multipliers (`List[int]`, *optional*, defaults to `[2, 1, 1]`):
Depth and width multipliers used for each level. Used on the `res_conv_width` and `res_conv_depth`
res_conv_depth (`int`, *optional*, defaults to 4):
Depth of the encoder and decoder block. If no `multipliers` are used, this is the same for each level.
res_conv_width (`int`, *optional*, defaults to 32):
Width of the encoder and decoder block. If no `multipliers` are used, this is the same for each level.
res_convolution_multiplier (`int`, *optional*, defaults to 1):
Scaling factor of the hidden dimension used in the `JukeboxResConv1DBlock`.
res_dilation_cycle (`int`, *optional*):
Dilation cycle value used in the `JukeboxResnet`. If an int is used, each new Conv1 block will have a depth
reduced by a power of `res_dilation_cycle`.
res_dilation_growth_rate (`int`, *optional*, defaults to 3):
Resnet dilation growth rate used in the VQVAE (dilation_growth_rate ** depth)
res_downs_t (`List[int]`, *optional*, defaults to `[3, 2, 2]`):
Downsampling rate for each level of the hierarchical VQ-VAE.
res_strides_t (`List[int]`, *optional*, defaults to `[2, 2, 2]`):
Stride used for each level of the hierarchical VQ-VAE.
sample_length (`int`, *optional*, defaults to 1058304):
Provides the max input shape of the VQVAE. Is used to compute the input shape of each level.
init_scale (`float`, *optional*, defaults to 0.2):
Initialization scale.
zero_out (`bool`, *optional*, defaults to `False`):
Whether or not to zero out convolution weights when initializing.
"""
model_type = 'jukebox_vqvae'
def __init__(self, act_fn='relu', nb_discrete_codes=2048, commit=0.02, conv_input_shape=1, conv_res_scale=False, embed_dim=64, hop_fraction=[0.125, 0.5, 0.5], levels=3, lmu=0.99, multipliers=[2, 1, 1], res_conv_depth=4, res_conv_width=32, res_convolution_multiplier=1, res_dilation_cycle=None, res_dilation_growth_rate=3, res_downs_t=[3, 2, 2], res_strides_t=[2, 2, 2], sample_length=1058304, init_scale=0.2, zero_out=False, **kwargs):
super().__init__(**kwargs)
self.hop_fraction = hop_fraction
self.conv_input_shape = conv_input_shape
self.sample_length = sample_length
self.levels = levels
self.embed_dim = embed_dim
self.nb_discrete_codes = nb_discrete_codes
self.res_conv_width = res_conv_width
self.res_conv_depth = res_conv_depth
self.res_convolution_multiplier = res_convolution_multiplier
self.res_dilation_growth_rate = res_dilation_growth_rate
self.res_dilation_cycle = res_dilation_cycle
self.multipliers = multipliers
self.res_downs_t = res_downs_t
self.res_strides_t = res_strides_t
self.lmu = lmu
self.commit = commit
self.conv_res_scale = conv_res_scale
self.act_fn = act_fn
self.init_scale = init_scale
self.zero_out = zero_out
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs):
cls._set_token_in_kwargs(kwargs)
config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
if config_dict.get('model_type') == 'jukebox':
config_dict = config_dict['vqvae_config']
if 'model_type' in config_dict and hasattr(cls, 'model_type') and (config_dict['model_type'] != cls.model_type):
logger.warning(f"You are using a model of type {config_dict['model_type']} to instantiate a model of type {cls.model_type}. This is not supported for all configurations of models and can yield errors.")
return cls.from_dict(config_dict, **kwargs)
|
class JukeboxVQVAEConfig(PretrainedConfig):
'''
This is the configuration class to store the configuration of a [`JukeboxVQVAE`]. It is used to instantiate a
`JukeboxVQVAE` according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the VQVAE from
[openai/jukebox-1b-lyrics](https://huggingface.co/openai/jukebox-1b-lyrics) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
act_fn (`str`, *optional*, defaults to `"relu"`):
Activation function of the model.
nb_discrete_codes (`int`, *optional*, defaults to 2048):
Number of codes of the VQVAE.
commit (`float`, *optional*, defaults to 0.02):
Commit loss multiplier.
conv_input_shape (`int`, *optional*, defaults to 1):
Number of audio channels.
conv_res_scale (`bool`, *optional*, defaults to `False`):
Whether or not to scale the residuals of the `JukeboxResConv1DBlock`.
embed_dim (`int`, *optional*, defaults to 64):
Embedding dimension of the codebook vectors.
hop_fraction (`List[int]`, *optional*, defaults to `[0.125, 0.5, 0.5]`):
Fraction of non-intersecting window used when continuing the sampling process.
levels (`int`, *optional*, defaults to 3):
Number of hierarchical levels that used in the VQVAE.
lmu (`float`, *optional*, defaults to 0.99):
Used in the codebook update, exponential moving average coefficient. For more detail refer to Appendix A.1
of the original [VQVAE paper](https://huggingface.co/papers/1711.00937v2.pdf)
multipliers (`List[int]`, *optional*, defaults to `[2, 1, 1]`):
Depth and width multipliers used for each level. Used on the `res_conv_width` and `res_conv_depth`
res_conv_depth (`int`, *optional*, defaults to 4):
Depth of the encoder and decoder block. If no `multipliers` are used, this is the same for each level.
res_conv_width (`int`, *optional*, defaults to 32):
Width of the encoder and decoder block. If no `multipliers` are used, this is the same for each level.
res_convolution_multiplier (`int`, *optional*, defaults to 1):
Scaling factor of the hidden dimension used in the `JukeboxResConv1DBlock`.
res_dilation_cycle (`int`, *optional*):
Dilation cycle value used in the `JukeboxResnet`. If an int is used, each new Conv1 block will have a depth
reduced by a power of `res_dilation_cycle`.
res_dilation_growth_rate (`int`, *optional*, defaults to 3):
Resnet dilation growth rate used in the VQVAE (dilation_growth_rate ** depth)
res_downs_t (`List[int]`, *optional*, defaults to `[3, 2, 2]`):
Downsampling rate for each level of the hierarchical VQ-VAE.
res_strides_t (`List[int]`, *optional*, defaults to `[2, 2, 2]`):
Stride used for each level of the hierarchical VQ-VAE.
sample_length (`int`, *optional*, defaults to 1058304):
Provides the max input shape of the VQVAE. Is used to compute the input shape of each level.
init_scale (`float`, *optional*, defaults to 0.2):
Initialization scale.
zero_out (`bool`, *optional*, defaults to `False`):
Whether or not to zero out convolution weights when initializing.
'''
def __init__(self, act_fn='relu', nb_discrete_codes=2048, commit=0.02, conv_input_shape=1, conv_res_scale=False, embed_dim=64, hop_fraction=[0.125, 0.5, 0.5], levels=3, lmu=0.99, multipliers=[2, 1, 1], res_conv_depth=4, res_conv_width=32, res_convolution_multiplier=1, res_dilation_cycle=None, res_dilation_growth_rate=3, res_downs_t=[3, 2, 2], res_strides_t=[2, 2, 2], sample_length=1058304, init_scale=0.2, zero_out=False, **kwargs):
pass
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs):
pass
| 4
| 1
| 31
| 3
| 28
| 1
| 2
| 0.91
| 1
| 2
| 0
| 0
| 1
| 20
| 2
| 34
| 121
| 10
| 58
| 49
| 31
| 53
| 31
| 25
| 28
| 3
| 2
| 1
| 4
|
1,765
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/jukebox/modeling_jukebox.py
|
transformers.models.deprecated.jukebox.modeling_jukebox.JukeboxAttention
|
from torch import nn
import torch.nn.functional as F
import torch
class JukeboxAttention(nn.Module):
def __init__(self, config, n_ctx, attn_func='dense_attn'):
super().__init__()
self.embed_dim = config.hidden_size
self.n_heads = config.n_heads
self.dropout = config.attn_dropout
hidden_dim = int(config.attention_multiplier * self.embed_dim)
self.head_dim = hidden_dim // config.n_heads
self.n_ctx = n_ctx
self.hidden_dim = hidden_dim
self.scale = self.head_dim ** (-0.25)
self.mask = config.mask
if attn_func == 'cross_attention':
self.c_attn = JukeboxConv1D(self.embed_dim, hidden_dim)
self.c_enc_kv = JukeboxConv1D(self.embed_dim, hidden_dim * 2)
else:
self.c_attn = JukeboxConv1D(self.embed_dim, hidden_dim * 3)
self.c_proj = JukeboxConv1D(hidden_dim, self.embed_dim)
self.attn_dropout = nn.Dropout(config.attn_dropout)
self.resid_dropout = nn.Dropout(config.resid_dropout)
self.attn_func = attn_func
if attn_func == 'cross_attention':
self.qkv = self.decode_qkv
elif attn_func == 'prime_attn':
self.qkv = self.prime_qkv
else:
self.qkv = self.factored_qkv
ATTENTION_MAP = {'dense_attn': (self.dense_attn, 'autoregressive'), 'block_attn': (self.block_attn, 'autoregressive'), 'transpose_block_attn': (self.transpose_block_attn, 'autoregressive'), 'prev_block_attn': (self.prev_block_attn, None), 'summary_attn': (self.summary_attn, 'summary'), 'summary_spread_attn': (self.summary_spread_attn, 'summary'), 'cross_attention': (self.dense_attn, None), 'prime_attn': (self.prime_attn, 'prime')}
self.attn, self.attn_mask = ATTENTION_MAP[attn_func]
self.blocks = config.blocks
self.spread = config.spread
if self.blocks is not None:
self.block_ctx = self.n_ctx // self.blocks
self.sample_t = 0
self.cache = {}
self.encoder_len = config.nb_relevant_lyric_tokens
self.record_attn = False
def _attn(self, query_states, key_states, value_states, sample):
scale = self.scale
if self.training:
attention_weight = torch.matmul(query_states * scale, key_states * scale)
else:
attention_weight = torch.matmul(query_states, key_states)
attention_weight.mul_(scale * scale)
attn_weight_type = attention_weight.dtype
attention_weight = attention_weight.float()
if self.mask:
mask = get_mask(self.attn_mask, query_states.size(-2), key_states.size(-1), self.blocks, self.spread, attention_weight.device, sample, self.sample_t)
if mask is not None:
attention_weight = attention_weight * mask + -1000000000.0 * (1 - mask)
attention_prob = F.softmax(attention_weight, dim=-1).type(attn_weight_type)
if self.record_attn:
self.attention_prob = attention_prob
if self.attn_func == 'prime_attn':
self.attention_prob = self.attention_prob[:, :, self.encoder_len:, :self.encoder_len]
attention_prob = self.attn_dropout(attention_prob)
context_states = torch.matmul(attention_prob, value_states)
return context_states
def merge_heads(self, hidden_states):
hidden_states = hidden_states.permute(0, 2, 1, 3).contiguous()
new_hidden_states_shape = (*hidden_states.size()[:-2], hidden_states.size(-2) * hidden_states.size(-1))
return hidden_states.view(*new_hidden_states_shape)
def split_heads(self, hidden_states, is_key=False):
new_hidden_states_shape = (*hidden_states.size()[:-1], self.n_heads, hidden_states.size(-1) // self.n_heads)
hidden_states = hidden_states.view(*new_hidden_states_shape)
if is_key:
return hidden_states.permute(0, 2, 3, 1)
else:
return hidden_states.permute(0, 2, 1, 3)
def dense_attn(self, query, key, value, sample):
query = self.split_heads(query)
key = self.split_heads(key, is_key=True)
value = self.split_heads(value)
context_states = self._attn(query, key, value, sample)
context_states = self.merge_heads(context_states)
return context_states
def block_attn(self, query, key, value, sample):
block_ctx = self.block_ctx
batch_size, seq_len, embed_dim = value.shape
if sample:
return self.dense_attn(query, key, value, sample).view(batch_size, 1, embed_dim)
else:
query_length = query.shape[1]
query = query.view(batch_size * query_length // block_ctx, block_ctx, embed_dim)
if query_length < seq_len:
seq_len = query_length
key = key[:, -seq_len:].contiguous()
value = value[:, -seq_len:].contiguous()
key = key.view(batch_size * seq_len // block_ctx, block_ctx, embed_dim)
value = value.view(batch_size * seq_len // block_ctx, block_ctx, embed_dim)
return self.dense_attn(query, key, value, sample).view(batch_size, seq_len, embed_dim)
def transpose_block_attn(self, query, key, value, sample):
block_ctx = self.block_ctx
batch_size, seq_len, embed_dim = value.shape
if sample:
block_len = (seq_len - 1) % block_ctx
key = key[:, block_len::block_ctx, :]
value = value[:, block_len::block_ctx, :]
return self.dense_attn(query, key, value, sample).view(batch_size, 1, embed_dim)
else:
query_length = query.shape[1]
query = query.view(batch_size, query_length // block_ctx, block_ctx, embed_dim)
query = query.transpose(1, 2).contiguous()
query = query.view(batch_size * block_ctx, query_length // block_ctx, embed_dim)
key = key.view(batch_size, seq_len // block_ctx, block_ctx, embed_dim)
key = key.transpose(1, 2).contiguous()
key = key.view(batch_size * block_ctx, seq_len // block_ctx, embed_dim)
value = value.view(batch_size, seq_len // block_ctx, block_ctx, embed_dim)
value = value.transpose(1, 2).contiguous()
value = value.view(batch_size * block_ctx, seq_len // block_ctx, embed_dim)
block_attn = self.dense_attn(query, key, value, sample)
block_attn = block_attn.view(batch_size, block_ctx, query_length // block_ctx, embed_dim)
block_attn = block_attn.transpose(1, 2).contiguous()
block_attn = block_attn.view(batch_size, query_length, embed_dim)
return block_attn
def prev_block_attn(self, query, key, value, sample):
block_ctx = self.block_ctx
batch_size, seq_len, embed_dim = value.shape
if sample:
block = (seq_len - 1) // block_ctx
prev_l = (block - 1) * block_ctx
if block > 0:
key = key[:, prev_l:prev_l + block_ctx, :]
value = value[:, prev_l:prev_l + block_ctx, :]
else:
key = torch.zeros(batch_size, block_ctx, embed_dim, device=query.device, dtype=query.dtype)
value = torch.zeros(batch_size, block_ctx, embed_dim, device=query.device, dtype=query.dtype)
return self.dense_attn(query, key, value, sample).view(batch_size, 1, embed_dim)
else:
query_length = query.shape[1]
query = query.view(batch_size * query_length // block_ctx, block_ctx, embed_dim)
key = key.view(batch_size, seq_len // block_ctx, block_ctx, embed_dim)[:, :-1, :, :]
key = torch.nn.functional.pad(key, (0, 0, 0, 0, 1, 0))
key = key.view(batch_size * seq_len // block_ctx, block_ctx, embed_dim)
value = value.view(batch_size, seq_len // block_ctx, block_ctx, embed_dim)[:, :-1, :, :]
value = torch.nn.functional.pad(value, (0, 0, 0, 0, 1, 0))
value = value.view(batch_size * seq_len // block_ctx, block_ctx, embed_dim)
if query_length < seq_len:
nb_query_blocks = query_length // block_ctx
nb_key_blocks = seq_len // block_ctx
seq_len = query_length
key = key.view(batch_size, nb_key_blocks, block_ctx, embed_dim)[:, -nb_query_blocks:]
key = key.contiguous().view(batch_size * nb_query_blocks, block_ctx, embed_dim)
value = value.view(batch_size, nb_key_blocks, block_ctx, embed_dim)[:, -nb_query_blocks:]
value = value.contiguous().view(batch_size * nb_query_blocks, block_ctx, embed_dim)
return self.dense_attn(query, key, value, sample).view(batch_size, seq_len, embed_dim)
def summary_attn(self, query, key, value, sample):
blocks = self.blocks
block_ctx = self.block_ctx
batch_size, seq_len, embed_dim = value.shape
if sample:
key = key[:, block_ctx - 1:blocks * block_ctx - 1:block_ctx, :]
key = torch.nn.functional.pad(key, (0, 0, 1, 0))
value = value[:, block_ctx - 1:blocks * block_ctx - 1:block_ctx, :]
value = torch.nn.functional.pad(value, (0, 0, 1, 0))
return self.dense_attn(query, key, value, sample).view(batch_size, 1, embed_dim)
else:
key = key.view(batch_size, blocks, seq_len // blocks, embed_dim)[:, :-1, -1, :]
key = torch.nn.functional.pad(key, (0, 0, 1, 0))
value = value.view(batch_size, blocks, seq_len // blocks, embed_dim)[:, :-1, -1, :]
value = torch.nn.functional.pad(value, (0, 0, 1, 0))
return self.dense_attn(query, key, value, sample).view(batch_size, seq_len, embed_dim)
def summary_spread_attn(self, query, key, value, sample):
blocks = self.blocks
spread = self.spread
batch_size, seq_len, embed_dim = value.shape
if sample:
raise NotImplementedError
else:
key = key.view(batch_size, blocks, seq_len // blocks, embed_dim)[:, :-1, -spread:, :]
key = torch.nn.functional.pad(key, (0, 0, 0, 0, 1, 0)).contiguous()
key = key.view(batch_size, blocks * spread, embed_dim)
value = value.view(batch_size, blocks, seq_len // blocks, embed_dim)[:, :-1, -spread:, :]
value = torch.nn.functional.pad(value, (0, 0, 0, 0, 1, 0)).contiguous()
value = value.view(batch_size, blocks * spread, embed_dim)
return self.dense_attn(query, key, value, sample).view(batch_size, seq_len, embed_dim)
def prime_attn(self, query, key, value, sample):
encoder_len = self._encoder_len
key = key[:, :encoder_len]
value = value[:, :encoder_len]
return self.dense_attn(query, key, value, sample)
def factored_qkv(self, hidden_states, last_encoder_hidden_states=None, sample=False):
curr_ctx = hidden_states.shape[1]
if last_encoder_hidden_states is not None:
raise TypeError('last_encoder_hidden_states should be None')
query, key, value = hidden_states.chunk(3, dim=2)
if sample:
self.sample_t += curr_ctx
key, value = self._append_cache(key, value)
l_cache = self._suff_cache_len()
if self._cache_len() > l_cache:
self._slice_cache(-l_cache)
if curr_ctx > 1:
if self.attn_func != 'dense_attn':
query = self._pad_to_block_ctx(query, query=True)
key = self._pad_to_block_ctx(key)
value = self._pad_to_block_ctx(value)
sample = False
else:
key = self.cache['key']
value = self.cache['value']
return (query, key, value, sample)
def prime_qkv(self, hidden_states, last_encoder_hidden_states=None, sample=False):
curr_ctx = hidden_states.shape[1]
if last_encoder_hidden_states is not None:
raise TypeError('last_encoder_hidden_states should be None')
query, key, value = hidden_states.chunk(3, dim=2)
if sample:
if self._cache_len() < self._encoder_len:
self._append_cache(key, value)
if self._cache_len() > self._encoder_len:
self._slice_cache(0, self._encoder_len)
key, value = (self.cache['key'], self.cache['value'])
self.sample_t += curr_ctx
return (query, key, value, sample)
def decode_qkv(self, hidden_states, last_encoder_hidden_states=None, sample=False):
curr_ctx = hidden_states.shape[1]
query = hidden_states
if sample:
if self.sample_t == 0:
self.cache['key'], self.cache['value'] = self.c_enc_kv(last_encoder_hidden_states.type_as(hidden_states)).chunk(2, dim=2)
key, value = (self.cache['key'], self.cache['value'])
self.sample_t += curr_ctx
else:
key, value = self.c_enc_kv(last_encoder_hidden_states.type_as(hidden_states)).chunk(2, dim=2)
return (query, key, value, sample)
def forward(self, hidden_states, last_encoder_hidden_states=None, sample=False):
curr_ctx = hidden_states.shape[1]
hidden_states = self.c_attn(hidden_states)
query, key, value, sample = self.qkv(hidden_states, last_encoder_hidden_states=last_encoder_hidden_states, sample=sample)
attention_scores = self.attn(query, key, value, sample)
if attention_scores.shape[1] != curr_ctx:
offset = self._offset(curr_ctx)
attention_scores = attention_scores[:, offset:offset + curr_ctx, :].contiguous()
attention_scores = self.c_proj(attention_scores)
return self.resid_dropout(attention_scores)
@property
def _encoder_len(self):
encoder_len = self.encoder_len
encoder_blocks = encoder_len // self.blocks + 1
return encoder_blocks * self.blocks
def _offset(self, curr_ctx):
if self.attn_func == 'dense_attn':
return 0
return (self.sample_t - curr_ctx) % self.block_ctx
def _pad_to_block_ctx(self, hidden_states, query=False):
seq_len = hidden_states.shape[1]
offset = self._offset(seq_len) if query else 0
n_blocks = (seq_len + offset + self.block_ctx - 1) // self.block_ctx
pad = n_blocks * self.block_ctx - seq_len - offset
if pad == 0 and offset == 0:
return hidden_states
else:
return F.pad(hidden_states, (0, 0, offset, pad))
def _cache_len(self):
return 0 if 'key' not in self.cache else self.cache['key'].shape[1]
def _suff_cache_len(self):
"""
Precondition:
key and value are appended with the current context and self.sample_t reflects the 1-indexed sample
location in the context.
"""
previous_block_length = (self.sample_t - 1) % self.block_ctx + 1 + self.block_ctx
REQUIRED_CACHE_LEN = {'dense_attn': self.sample_t, 'block_attn': (self.sample_t - 1) % self.block_ctx + 1, 'transpose_block_attn': self.sample_t, 'prev_block_attn': self.sample_t if self.sample_t <= self.block_ctx else previous_block_length, 'cross_attn': self.encoder_len, 'prime_attn': min(self.sample_t, self._encoder_len)}
return REQUIRED_CACHE_LEN[self.attn_func]
def _slice_cache(self, start, end=None):
self.cache['key'] = self.cache['key'][:, start:end]
self.cache['value'] = self.cache['value'][:, start:end]
def _append_cache(self, key, value):
if 'key' not in self.cache:
self.cache['key'] = key
self.cache['value'] = value
else:
old_key, old_value = (key, value)
key = torch.cat([self.cache['key'], old_key], dim=1)
value = torch.cat([self.cache['value'], old_value], dim=1)
del self.cache['key']
del self.cache['value']
del old_key
del old_value
self.cache['key'] = key
self.cache['value'] = value
return (self.cache['key'], self.cache['value'])
def del_cache(self):
self.sample_t = 0
if 'key' in self.cache:
del self.cache['key']
if 'value' in self.cache:
del self.cache['value']
self.cache = {}
|
class JukeboxAttention(nn.Module):
def __init__(self, config, n_ctx, attn_func='dense_attn'):
pass
def _attn(self, query_states, key_states, value_states, sample):
pass
def merge_heads(self, hidden_states):
pass
def split_heads(self, hidden_states, is_key=False):
pass
def dense_attn(self, query, key, value, sample):
pass
def block_attn(self, query, key, value, sample):
pass
def transpose_block_attn(self, query, key, value, sample):
pass
def prev_block_attn(self, query, key, value, sample):
pass
def summary_attn(self, query, key, value, sample):
pass
def summary_spread_attn(self, query, key, value, sample):
pass
def prime_attn(self, query, key, value, sample):
pass
def factored_qkv(self, hidden_states, last_encoder_hidden_states=None, sample=False):
pass
def prime_qkv(self, hidden_states, last_encoder_hidden_states=None, sample=False):
pass
def decode_qkv(self, hidden_states, last_encoder_hidden_states=None, sample=False):
pass
def forward(self, hidden_states, last_encoder_hidden_states=None, sample=False):
pass
@property
def _encoder_len(self):
pass
def _offset(self, curr_ctx):
pass
def _pad_to_block_ctx(self, hidden_states, query=False):
pass
def _cache_len(self):
pass
def _suff_cache_len(self):
'''
Precondition:
key and value are appended with the current context and self.sample_t reflects the 1-indexed sample
location in the context.
'''
pass
def _slice_cache(self, start, end=None):
pass
def _append_cache(self, key, value):
pass
def del_cache(self):
pass
| 25
| 1
| 15
| 1
| 14
| 1
| 3
| 0.06
| 1
| 5
| 1
| 0
| 23
| 25
| 23
| 33
| 372
| 45
| 318
| 103
| 293
| 19
| 269
| 102
| 245
| 6
| 1
| 3
| 61
|
1,766
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/jukebox/modeling_jukebox.py
|
transformers.models.deprecated.jukebox.modeling_jukebox.JukeboxBlock
|
from torch import nn
class JukeboxBlock(nn.Module):
def __init__(self, config, n_ctx, attn_func='dense_attn'):
super().__init__()
self.width = config.hidden_size
self.attn = JukeboxAttention(config, n_ctx, attn_func=attn_func)
self.layer_norm_0 = JukeboxLayerNorm(config.hidden_size)
self.mlp = JukeboxMLP(config)
self.layer_norm_1 = JukeboxLayerNorm(config.hidden_size)
self.res_scale = 1.0 / config.num_layers if config.attn_res_scale else 1.0
self.attn_func = attn_func
def forward(self, hidden_states, last_encoder_hidden_states, sample=False):
residuals = hidden_states
hidden_states = self.layer_norm_0(hidden_states)
hidden_states = self.attn(hidden_states, last_encoder_hidden_states, sample)
output_states = self.layer_norm_1(residuals + hidden_states)
output_states = self.mlp(output_states)
if self.res_scale == 1.0:
output = residuals + hidden_states + output_states
else:
output = residuals + self.res_scale * (hidden_states + output_states)
return output
|
class JukeboxBlock(nn.Module):
def __init__(self, config, n_ctx, attn_func='dense_attn'):
pass
def forward(self, hidden_states, last_encoder_hidden_states, sample=False):
pass
| 3
| 0
| 11
| 1
| 10
| 0
| 2
| 0
| 1
| 4
| 3
| 0
| 2
| 7
| 2
| 12
| 24
| 3
| 21
| 13
| 18
| 0
| 20
| 13
| 17
| 2
| 1
| 1
| 4
|
1,767
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/jukebox/modeling_jukebox.py
|
transformers.models.deprecated.jukebox.modeling_jukebox.JukeboxBottleneck
|
from torch import nn
class JukeboxBottleneck(nn.Module):
def __init__(self, config, levels):
super().__init__()
self.levels = levels
self.level_blocks = nn.ModuleList()
for level in range(self.levels):
self.level_blocks.append(JukeboxBottleneckBlock(config))
def encode(self, raw_audio):
music_tokens = [level_block.encode(hidden_states) for level_block, hidden_states in zip(self.level_blocks, raw_audio)]
return music_tokens
def decode(self, music_tokens, start_level=0, end_level=None):
if end_level is None:
end_level = self.levels
quantised_audio = [level_block.decode(z) for level_block, z in zip(self.level_blocks[start_level:end_level], music_tokens)]
return quantised_audio
def forward(self, input_audio):
music_tokens, quantised_states, commit_losses, metrics = ([], [], [], [])
for level in range(self.levels):
level_block = self.level_blocks[-level - 1]
hidden_states = input_audio[level]
sampled_tokens, quantised_state, commit_loss, metric = level_block(hidden_states, update_codebook=self.training)
music_tokens.append(sampled_tokens)
if not self.training:
quantised_state = quantised_state.detach()
quantised_states.append(quantised_state)
commit_losses.append(commit_loss)
if self.training:
metrics.append(metric)
return (music_tokens, quantised_states, commit_losses, metrics)
|
class JukeboxBottleneck(nn.Module):
def __init__(self, config, levels):
pass
def encode(self, raw_audio):
pass
def decode(self, music_tokens, start_level=0, end_level=None):
pass
def forward(self, input_audio):
pass
| 5
| 0
| 9
| 0
| 9
| 1
| 2
| 0.06
| 1
| 4
| 1
| 0
| 4
| 2
| 4
| 14
| 40
| 3
| 35
| 15
| 30
| 2
| 29
| 15
| 24
| 4
| 1
| 2
| 9
|
1,768
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/jukebox/modeling_jukebox.py
|
transformers.models.deprecated.jukebox.modeling_jukebox.JukeboxBottleneckBlock
|
from torch import nn
from .configuration_jukebox import ATTENTION_PATTERNS, JukeboxConfig, JukeboxPriorConfig, JukeboxVQVAEConfig
import numpy as np
import torch
import torch.nn.functional as F
class JukeboxBottleneckBlock(nn.Module):
def __init__(self, config: JukeboxVQVAEConfig):
super().__init__()
self.nb_discrete_codes = config.nb_discrete_codes
self.codebook_width = config.embed_dim
self.mu = config.lmu
self.threshold = 1.0
self.init = False
self.codebook_sum = None
self.codebook_elem = None
self.register_buffer('codebook', torch.zeros(self.nb_discrete_codes, self.codebook_width))
def _tile(self, hidden_states):
dim, embed_width = hidden_states.shape
if dim < self.nb_discrete_codes:
n_repeats = (self.nb_discrete_codes + dim - 1) // dim
std = 0.01 / np.sqrt(embed_width)
hidden_states = hidden_states.repeat(n_repeats, 1)
hidden_states = hidden_states + torch.randn_like(hidden_states) * std
return hidden_states
def init_codebook(self, hidden_states):
nb_discrete_codes = self.nb_discrete_codes
self.init = True
codes = self._tile(hidden_states)
self.codebook = codes[torch.randperm(codes.shape[0])][:nb_discrete_codes]
self.codebook_sum = self.codebook
self.codebook_elem = torch.ones(nb_discrete_codes, device=self.codebook.device)
def update_codebook(self, hidden_states, latent_states):
mu, codebook_width, nb_discrete_codes = (self.mu, self.codebook_width, self.nb_discrete_codes)
with torch.no_grad():
latent_states_onehot = torch.zeros(nb_discrete_codes, hidden_states.shape[0], device=hidden_states.device)
latent_states_onehot.scatter_(0, latent_states.view(1, hidden_states.shape[0]), 1)
_codebook_sum = torch.matmul(latent_states_onehot, hidden_states)
_codebook_elem = latent_states_onehot.sum(dim=-1)
codes = self._tile(hidden_states)
_random_codebook = codes[torch.randperm(codes.shape[0])][:nb_discrete_codes]
old_codebook = self.codebook
self.codebook_sum = mu * self.codebook_sum + (1.0 - mu) * _codebook_sum
self.codebook_elem = mu * self.codebook_elem + (1.0 - mu) * _codebook_elem
usage = (self.codebook_elem.view(nb_discrete_codes, 1) >= self.threshold).float()
norm_code = self.codebook_sum.view(nb_discrete_codes, codebook_width) / self.codebook_elem.view(nb_discrete_codes, 1)
self.codebook = usage * norm_code + (1 - usage) * _random_codebook
_codebook_prob = _codebook_elem / torch.sum(_codebook_elem)
entropy = -torch.sum(_codebook_prob * torch.log(_codebook_prob + 1e-08))
used_curr = (_codebook_elem >= self.threshold).sum()
usage = torch.sum(usage)
dk = torch.linalg.norm(self.codebook - old_codebook) / np.sqrt(np.prod(old_codebook.shape))
return {'entropy': entropy, 'used_curr': used_curr, 'usage': usage, 'dk': dk}
def preprocess(self, hidden_states):
hidden_states = hidden_states.permute(0, 2, 1).contiguous()
hidden_states = hidden_states.view(-1, hidden_states.shape[-1])
if hidden_states.shape[-1] == self.codebook_width:
prenorm = torch.linalg.norm(hidden_states - torch.mean(hidden_states)) / np.sqrt(np.prod(hidden_states.shape))
elif hidden_states.shape[-1] == 2 * self.codebook_width:
x1, x2 = (hidden_states[..., :self.codebook_width], hidden_states[..., self.codebook_width:])
prenorm = torch.linalg.norm(x1 - torch.mean(x1)) / np.sqrt(np.prod(x1.shape)) + torch.linalg.norm(x2 - torch.mean(x2)) / np.sqrt(np.prod(x2.shape))
hidden_states = x1 + x2
return (hidden_states, prenorm)
def postprocess(self, latent_states, dequantised_states, x_shape):
batch_size, time = x_shape
dequantised_states = dequantised_states.view(batch_size, time, -1).permute(0, 2, 1).contiguous()
latent_states = latent_states.view(batch_size, time)
return (latent_states, dequantised_states)
def quantise(self, latent_states):
codebook_weights = self.codebook.t()
distance = torch.sum(latent_states ** 2, dim=-1, keepdim=True) - 2 * torch.matmul(latent_states, codebook_weights) + torch.sum(codebook_weights ** 2, dim=0, keepdim=True)
min_distance, music_tokens = torch.min(distance, dim=-1)
fit = torch.mean(min_distance)
return (music_tokens, fit)
def dequantise(self, music_tokens):
dequantised_states = F.embedding(music_tokens, self.codebook)
return dequantised_states
def encode(self, latent_states):
samples, _, seq_len = latent_states.shape
latent_states, _ = self.preprocess(latent_states)
music_tokens, _ = self.quantise(latent_states)
music_tokens = music_tokens.view(samples, seq_len)
return music_tokens
def decode(self, music_tokens):
samples, seq_len = music_tokens.shape
dequantised_states = self.dequantise(music_tokens)
dequantised_states = dequantised_states.view(samples, seq_len, self.codebook_width).permute(0, 2, 1).contiguous()
return dequantised_states
def forward(self, hidden_states, update_codebook=True):
samples, _, seq_len = hidden_states.shape
hidden_states, prenorm = self.preprocess(hidden_states)
if update_codebook and (not self.init):
self.init_codebook(hidden_states)
music_tokens, fit = self.quantise(hidden_states)
dequantised_states = self.dequantise(music_tokens)
if update_codebook:
update_metrics = self.update_codebook(hidden_states, music_tokens)
else:
update_metrics = {}
commit_loss = torch.linalg.norm(dequantised_states.detach() - hidden_states) ** 2 / np.prod(hidden_states.shape)
dequantised_states = hidden_states + (dequantised_states - hidden_states).detach()
music_tokens, dequantised_states = self.postprocess(music_tokens, dequantised_states, (samples, seq_len))
return (music_tokens, dequantised_states, commit_loss, dict(fit=fit, pn=prenorm, **update_metrics))
|
class JukeboxBottleneckBlock(nn.Module):
def __init__(self, config: JukeboxVQVAEConfig):
pass
def _tile(self, hidden_states):
pass
def init_codebook(self, hidden_states):
pass
def update_codebook(self, hidden_states, latent_states):
pass
def preprocess(self, hidden_states):
pass
def postprocess(self, latent_states, dequantised_states, x_shape):
pass
def quantise(self, latent_states):
pass
def dequantise(self, music_tokens):
pass
def encode(self, latent_states):
pass
def decode(self, music_tokens):
pass
def forward(self, hidden_states, update_codebook=True):
pass
| 12
| 0
| 13
| 2
| 10
| 2
| 1
| 0.21
| 1
| 3
| 1
| 0
| 11
| 8
| 11
| 21
| 152
| 28
| 107
| 56
| 95
| 22
| 95
| 56
| 83
| 3
| 1
| 1
| 16
|
1,769
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/jukebox/modeling_jukebox.py
|
transformers.models.deprecated.jukebox.modeling_jukebox.JukeboxConditionalAutoregressive
|
from ....utils.logging import tqdm
from torch import nn
import torch.nn.functional as F
import torch
import numpy as np
class JukeboxConditionalAutoregressive(nn.Module):
def __init__(self, config, n_ctx=None, embed_dim=None, audio_conditioning=False, metadata_conditioning=False, is_encoder=False):
"""
Autoregressive model on either lyric tokens or music tokens, or both. The attention pattern should be properly
set for each configuration.
Args:
config (`JukeboxPriorConfig`):
Model configuration class with all the parameters of the model. Initializing with a config file does
not load the weights associated with the model, only the configuration. Check out the
[`~PreTrainedModel.from_pretrained`] method to load the model weights.
n_ctx (`int`, *optional*):
Number of tokens or lyrics tokens provided in a single pass.
embed_dim (`int`, *optional*):
Either equals to the dimension of the codebook, or the sum of n_vocab (lyrics) and codebook dimension,
if the model combines lyrics and music tokens, or simply n_vocab if the model is a separate encoder
audio_conditioning (`bool`, *optional*, defaults to `False`):
Whether or not the prior supports conditioning on audio.
metadata_conditioning (`bool`, *optional*, defaults to `False`):
Whether or not the prior supports conditioning on artitst, genres, lyrics and timing.
is_encoder (`bool`, *optional*, defaults to `False`):
Whether the model is an encoder only model.
"""
super().__init__()
self.width = config.hidden_size
self.num_layers = config.num_layers
self.n_ctx = n_ctx if n_ctx is not None else config.n_ctx
self.embed_dim = embed_dim if embed_dim is not None else config.music_vocab_size
self.embed_tokens = nn.Embedding(self.embed_dim, config.hidden_size)
self.embed_tokens_dropout = nn.Dropout(config.emb_dropout)
self.metadata_conditioning = metadata_conditioning
self.audio_conditioning = audio_conditioning
if not metadata_conditioning:
self.start_token = nn.Parameter(torch.empty((1, config.hidden_size)))
self.pos_emb = JukeboxPositionalEmbedding(self.n_ctx, config.hidden_size)
self.pos_emb_dropout = nn.Dropout(config.emb_dropout)
self.transformer = JukeboxLayerStack(config, n_ctx=self.n_ctx)
self.is_encoder = is_encoder
self.encoder_len = config.nb_relevant_lyric_tokens
if config.merged_decoder:
self.add_cond_after_transformer = False
self.share_embed_tokens_fc_proj_out = False
else:
self.add_cond_after_transformer = True
self.share_embed_tokens_fc_proj_out = True
if not is_encoder:
self.fc_proj_out = nn.Linear(config.hidden_size, self.embed_dim, bias=False)
if self.share_embed_tokens_fc_proj_out:
self.fc_proj_out.weight = self.embed_tokens.weight
self.loss = torch.nn.CrossEntropyLoss()
def forward(self, tokens, audio_conditioning=None, metadata_conditioning=None, last_encoder_hidden_states=None, get_preds=False, get_acts=False, get_sep_loss=False):
"""
Args:
tokens (`torch.tensor`):
Can represent music tokens, lyrics tokens or both, depending on the configuration.
"""
batch_size = tokens.shape[0]
with torch.no_grad():
tokens = tokens.view(batch_size, -1).long()
if not self.audio_conditioning:
audio_conditioning = torch.zeros((batch_size, 1, self.width), device=tokens.device, dtype=self.transformer._attn_mods[0].mlp.c_fc.weight.dtype)
target = tokens
hidden_states = self.embed_tokens(tokens)
hidden_states = torch.cat((hidden_states[:, -1:], hidden_states[:, :-1]), dim=1)
if self.metadata_conditioning:
hidden_states[:, 0] = metadata_conditioning.view(batch_size, self.width)
else:
hidden_states[:, 0] = self.start_token
hidden_states = self.embed_tokens_dropout(hidden_states) + self.pos_emb_dropout(self.pos_emb()) + audio_conditioning
hidden_states = self.transformer(hidden_states, last_encoder_hidden_states=last_encoder_hidden_states)
if self.add_cond_after_transformer:
hidden_states = hidden_states + audio_conditioning
activations = hidden_states
if self.is_encoder:
return hidden_states
hidden_states = self.fc_proj_out(hidden_states)
loss_fn = nn.CrossEntropyLoss()
if get_sep_loss:
lyric_hidden_states = hidden_states[:, :self.encoder_len].reshape(-1, self.embed_dim)
token_hidden_states = hidden_states[:, self.encoder_len:].reshape(-1, self.embed_dim)
lyric_loss = loss_fn(lyric_hidden_states, target[:, :self.encoder_len].reshape(-1)) / np.log(2.0)
music_token_loss = loss_fn(token_hidden_states, target[:, self.encoder_len:].reshape(-1)) / np.log(2.0)
loss = (lyric_loss, music_token_loss)
else:
loss = loss_fn(hidden_states.view(-1, self.embed_dim), target.view(-1)) / np.log(2.0)
if get_preds:
return (loss, hidden_states)
elif get_acts:
return (loss, activations)
else:
return (loss, None)
def get_emb(self, sample_t, n_samples, tokens, audio_conditioning, metadata_conditioning):
if sample_t == 0:
hidden_states = torch.empty(n_samples, 1, self.width, dtype=self.embed_tokens.weight.dtype).to(self.embed_tokens.weight.device)
if self.metadata_conditioning:
hidden_states[:, 0] = metadata_conditioning.view(n_samples, self.width)
else:
hidden_states[:, 0] = self.start_token
else:
hidden_states = self.embed_tokens(tokens)
if audio_conditioning.shape == (n_samples, self.n_ctx, self.width):
cond = audio_conditioning[:, sample_t:sample_t + 1, :]
else:
cond = audio_conditioning
hidden_states = hidden_states + self.pos_emb()[sample_t:sample_t + 1] + cond
return (hidden_states, cond)
def sample(self, n_samples, audio_conditioning=None, metadata_conditioning=None, last_encoder_hidden_states=None, temp=1.0, top_k=0, top_p=0.0, get_preds=False, sample_tokens=None):
if sample_tokens is None:
sample_tokens = self.n_ctx
if not self.audio_conditioning:
audio_conditioning = torch.zeros((n_samples, 1, self.width), dtype=self.transformer._attn_mods[0].mlp.c_fc.weight.dtype).to(self.fc_proj_out.device)
with torch.no_grad():
sampled_tokens = []
tokens = None
if get_preds:
preds = []
iter = tqdm(range(0, sample_tokens), leave=False)
for sample_t in iter:
iter.set_description(f'Ancestral sampling {sample_tokens} music tokens', refresh=True)
hidden_states, cond = self.get_emb(sample_t, n_samples, tokens, audio_conditioning, metadata_conditioning)
hidden_states = self.transformer(hidden_states, last_encoder_hidden_states=last_encoder_hidden_states, sample=True)
if self.add_cond_after_transformer:
hidden_states = hidden_states + cond
hidden_states = self.fc_proj_out(hidden_states)
if get_preds:
preds.append(hidden_states.clone())
hidden_states = hidden_states / temp
hidden_states = filter_logits(hidden_states, top_k=top_k, top_p=top_p)
tokens = torch.distributions.Categorical(logits=hidden_states).sample()
sampled_tokens.append(tokens.clone())
del tokens
self.transformer.del_cache()
tokens = torch.cat(sampled_tokens, dim=1)
if get_preds:
preds = torch.cat(preds, dim=1)
if get_preds:
return (tokens, preds)
else:
return tokens
def split_chunks(self, length, chunk_size):
n_passes = (length + chunk_size - 1) // chunk_size
chunk_sizes = [*[chunk_size] * (n_passes - 1), (length - 1) % chunk_size + 1]
return chunk_sizes
def primed_sample(self, n_samples, lyric_and_music_tokens, audio_conditioning=None, metadata_conditioning=None, last_encoder_hidden_states=None, temp=1.0, top_k=0, top_p=0.0, get_preds=False, chunk_size=None, sample_tokens=None):
if sample_tokens is None:
sample_tokens = self.n_ctx
batch_size = lyric_and_music_tokens.shape[0]
with torch.no_grad():
lyric_and_music_tokens = lyric_and_music_tokens.view(batch_size, -1).long()
sampled_audio = torch.split(lyric_and_music_tokens, 1, dim=1)
sampled_audio = list(sampled_audio)
if not self.audio_conditioning:
audio_conditioning = torch.zeros((n_samples, 1, self.width), dtype=self.transformer._attn_mods[0].mlp.c_fc.weight.dtype).to(lyric_and_music_tokens.device)
with torch.no_grad():
if get_preds:
preds = []
if chunk_size is None:
chunk_size = len(sampled_audio)
chunk_sizes = self.split_chunks(len(sampled_audio), chunk_size)
x_primes = []
start = 0
token = None
for current_chunk_size in tqdm(chunk_sizes, desc='Preparing past key value', leave=False):
sampled_audio_prime, conds_prime = ([], [])
for sample_t in range(start, start + current_chunk_size):
x_prime, cond_prime = self.get_emb(sample_t, n_samples, token, audio_conditioning, metadata_conditioning)
token = sampled_audio[sample_t]
sampled_audio_prime.append(x_prime)
conds_prime.append(cond_prime)
start = start + current_chunk_size
x_prime, cond_prime = (torch.cat(sampled_audio_prime, dim=1), torch.cat(conds_prime, dim=1))
del sampled_audio_prime
del conds_prime
if not get_preds:
del cond_prime
x_prime = self.transformer(x_prime, last_encoder_hidden_states=last_encoder_hidden_states, sample=True)
if get_preds:
if self.add_cond_after_transformer:
x_prime = x_prime + cond_prime
del cond_prime
x_primes.append(x_prime)
else:
del x_prime
if get_preds:
x_prime = torch.cat(x_primes, dim=1)
x_prime = self.fc_proj_out(x_prime)
preds.append(x_prime)
input_tokens = sampled_audio[-1]
itererator = tqdm(range(len(sampled_audio), sample_tokens), desc=f'Sampling {len(range(len(sampled_audio), sample_tokens))} music tokens', leave=False)
for sample_t in itererator:
hidden_states, cond = self.get_emb(sample_t, n_samples, input_tokens, audio_conditioning, metadata_conditioning)
hidden_states = self.transformer(hidden_states, last_encoder_hidden_states=last_encoder_hidden_states, sample=True)
if self.add_cond_after_transformer:
hidden_states = hidden_states + cond
hidden_states = self.fc_proj_out(hidden_states)
if get_preds:
preds.append(hidden_states)
hidden_states = hidden_states / temp
hidden_states = filter_logits(hidden_states, top_k=top_k, top_p=top_p)
music_tokens = torch.distributions.Categorical(logits=hidden_states).sample()
sampled_audio.append(music_tokens.clone())
input_tokens = music_tokens
del input_tokens, music_tokens
self.transformer.del_cache()
music_tokens = torch.cat(sampled_audio, dim=1)
if get_preds:
preds = torch.cat(preds, dim=1)
if get_preds:
return (music_tokens, preds)
else:
return music_tokens
|
class JukeboxConditionalAutoregressive(nn.Module):
def __init__(self, config, n_ctx=None, embed_dim=None, audio_conditioning=False, metadata_conditioning=False, is_encoder=False):
'''
Autoregressive model on either lyric tokens or music tokens, or both. The attention pattern should be properly
set for each configuration.
Args:
config (`JukeboxPriorConfig`):
Model configuration class with all the parameters of the model. Initializing with a config file does
not load the weights associated with the model, only the configuration. Check out the
[`~PreTrainedModel.from_pretrained`] method to load the model weights.
n_ctx (`int`, *optional*):
Number of tokens or lyrics tokens provided in a single pass.
embed_dim (`int`, *optional*):
Either equals to the dimension of the codebook, or the sum of n_vocab (lyrics) and codebook dimension,
if the model combines lyrics and music tokens, or simply n_vocab if the model is a separate encoder
audio_conditioning (`bool`, *optional*, defaults to `False`):
Whether or not the prior supports conditioning on audio.
metadata_conditioning (`bool`, *optional*, defaults to `False`):
Whether or not the prior supports conditioning on artitst, genres, lyrics and timing.
is_encoder (`bool`, *optional*, defaults to `False`):
Whether the model is an encoder only model.
'''
pass
def forward(self, tokens, audio_conditioning=None, metadata_conditioning=None, last_encoder_hidden_states=None, get_preds=False, get_acts=False, get_sep_loss=False):
'''
Args:
tokens (`torch.tensor`):
Can represent music tokens, lyrics tokens or both, depending on the configuration.
'''
pass
def get_emb(self, sample_t, n_samples, tokens, audio_conditioning, metadata_conditioning):
pass
def sample(self, n_samples, audio_conditioning=None, metadata_conditioning=None, last_encoder_hidden_states=None, temp=1.0, top_k=0, top_p=0.0, get_preds=False, sample_tokens=None):
pass
def split_chunks(self, length, chunk_size):
pass
def primed_sample(self, n_samples, lyric_and_music_tokens, audio_conditioning=None, metadata_conditioning=None, last_encoder_hidden_states=None, temp=1.0, top_k=0, top_p=0.0, get_preds=False, chunk_size=None, sample_tokens=None):
pass
| 7
| 2
| 54
| 5
| 42
| 8
| 8
| 0.19
| 1
| 6
| 2
| 0
| 6
| 18
| 6
| 16
| 328
| 37
| 254
| 100
| 206
| 47
| 174
| 59
| 167
| 16
| 1
| 4
| 45
|
1,770
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/jukebox/modeling_jukebox.py
|
transformers.models.deprecated.jukebox.modeling_jukebox.JukeboxConv1D
|
from torch import nn
import torch.nn.functional as F
import torch
class JukeboxConv1D(nn.Module):
def __init__(self, input_width, output_width):
super().__init__()
self.input_width = input_width
self.output_width = output_width
weight = torch.empty(input_width, output_width)
bias = torch.zeros(output_width)
self.weight = nn.Parameter(weight)
self.bias = nn.Parameter(bias)
def forward(self, hidden_states):
size_out = (*hidden_states.size()[:-1], self.output_width)
hidden_states = torch.addmm(self.bias.type_as(hidden_states), hidden_states.view(-1, hidden_states.size(-1)), self.weight.type_as(hidden_states))
hidden_states = hidden_states.view(*size_out)
return hidden_states
|
class JukeboxConv1D(nn.Module):
def __init__(self, input_width, output_width):
pass
def forward(self, hidden_states):
pass
| 3
| 0
| 9
| 0
| 9
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 2
| 4
| 2
| 12
| 19
| 1
| 18
| 10
| 15
| 0
| 14
| 10
| 11
| 1
| 1
| 0
| 2
|
1,771
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/jukebox/modeling_jukebox.py
|
transformers.models.deprecated.jukebox.modeling_jukebox.JukeboxDecoder
|
from torch import nn
class JukeboxDecoder(nn.Module):
def __init__(self, config, hidden_dim, depth, levels, downs_t, strides_t):
super().__init__()
self.levels = levels
self.level_blocks = nn.ModuleList()
for level, down_t, stride_t in zip(list(range(self.levels)), downs_t, strides_t):
self.level_blocks.append(JukeboxDecoderConvBock(config, config.embed_dim, hidden_dim, depth, down_t, stride_t))
self.out = nn.Conv1d(config.embed_dim, config.conv_input_shape, 3, 1, 1)
def forward(self, hidden_states, all_levels=True):
hidden_state = hidden_states[-1]
for level in reversed(range(self.levels)):
level_block = self.level_blocks[level]
hidden_state = level_block(hidden_state)
if level != 0 and all_levels:
hidden_state = hidden_state + hidden_states[level - 1]
hidden_state = self.out(hidden_state)
return hidden_state
|
class JukeboxDecoder(nn.Module):
def __init__(self, config, hidden_dim, depth, levels, downs_t, strides_t):
pass
def forward(self, hidden_states, all_levels=True):
pass
| 3
| 0
| 12
| 2
| 9
| 1
| 3
| 0.05
| 1
| 6
| 1
| 0
| 2
| 3
| 2
| 12
| 25
| 5
| 19
| 10
| 16
| 1
| 17
| 10
| 14
| 3
| 1
| 2
| 5
|
1,772
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/jukebox/modeling_jukebox.py
|
transformers.models.deprecated.jukebox.modeling_jukebox.JukeboxDecoderConvBock
|
from torch import nn
class JukeboxDecoderConvBock(nn.Module):
def __init__(self, config, embed_dim, hidden_dim, depth, down_t, stride_t, reverse_dilation=True):
self.embed_dim = embed_dim
self.hidden_dim = hidden_dim
super().__init__()
blocks = []
if down_t > 0:
filter_t = stride_t * 2
pad_t = stride_t // 2
self.proj_in = nn.Conv1d(embed_dim, hidden_dim, 3, 1, 1)
for i in range(down_t):
blocks.append(JukeboxResnet1D(config, hidden_dim, depth, reverse_dilation))
blocks.append(nn.ConvTranspose1d(hidden_dim, hidden_dim if i < down_t - 1 else embed_dim, filter_t, stride_t, pad_t))
self.upsample_block = nn.ModuleList(blocks)
def forward(self, hidden_states):
hidden_states = self.proj_in(hidden_states)
for block in self.upsample_block:
hidden_states = block(hidden_states)
return hidden_states
|
class JukeboxDecoderConvBock(nn.Module):
def __init__(self, config, embed_dim, hidden_dim, depth, down_t, stride_t, reverse_dilation=True):
pass
def forward(self, hidden_states):
pass
| 3
| 0
| 11
| 0
| 11
| 0
| 3
| 0
| 1
| 3
| 1
| 0
| 2
| 4
| 2
| 12
| 24
| 1
| 23
| 12
| 20
| 0
| 19
| 12
| 16
| 4
| 1
| 2
| 6
|
1,773
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/jukebox/modeling_jukebox.py
|
transformers.models.deprecated.jukebox.modeling_jukebox.JukeboxEncoder
|
from torch import nn
class JukeboxEncoder(nn.Module):
def __init__(self, config, width, depth, levels, downs_t, strides_t):
super().__init__()
self.levels = levels
self.level_blocks = nn.ModuleList()
iterator = zip(list(range(self.levels)), downs_t, strides_t)
for i, down_t, stride_t in iterator:
self.level_blocks.append(JukeboxEncoderConvBlock(config, config.conv_input_shape if i == 0 else config.embed_dim, width, depth, down_t, stride_t))
def forward(self, hidden_states):
all_hidden_states = []
for level in range(self.levels):
level_block = self.level_blocks[level]
hidden_states = level_block(hidden_states)
all_hidden_states.append(hidden_states)
return all_hidden_states
|
class JukeboxEncoder(nn.Module):
def __init__(self, config, width, depth, levels, downs_t, strides_t):
pass
def forward(self, hidden_states):
pass
| 3
| 0
| 11
| 2
| 9
| 1
| 3
| 0.05
| 1
| 5
| 1
| 0
| 2
| 2
| 2
| 12
| 24
| 4
| 19
| 10
| 16
| 1
| 15
| 10
| 12
| 3
| 1
| 1
| 5
|
1,774
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/jukebox/modeling_jukebox.py
|
transformers.models.deprecated.jukebox.modeling_jukebox.JukeboxEncoderConvBlock
|
from torch import nn
class JukeboxEncoderConvBlock(nn.Module):
def __init__(self, config, embed_dim, hidden_dim, depth, down_t, stride_t):
super().__init__()
blocks = []
filter_t = stride_t * 2
pad_t = stride_t // 2
if down_t > 0:
for i in range(down_t):
blocks.append(nn.Conv1d(embed_dim if i == 0 else hidden_dim, hidden_dim, filter_t, stride_t, pad_t))
blocks.append(JukeboxResnet1D(config, hidden_dim, depth))
self.proj_out = nn.Conv1d(hidden_dim, config.embed_dim, 3, 1, 1)
self.downsample_block = nn.ModuleList(blocks)
def forward(self, hidden_states):
for block in self.downsample_block:
hidden_states = block(hidden_states)
hidden_states = self.proj_out(hidden_states)
return hidden_states
|
class JukeboxEncoderConvBlock(nn.Module):
def __init__(self, config, embed_dim, hidden_dim, depth, down_t, stride_t):
pass
def forward(self, hidden_states):
pass
| 3
| 0
| 8
| 0
| 8
| 0
| 3
| 0
| 1
| 3
| 1
| 0
| 2
| 2
| 2
| 12
| 18
| 1
| 17
| 10
| 14
| 0
| 17
| 10
| 14
| 4
| 1
| 2
| 6
|
1,775
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/jukebox/modeling_jukebox.py
|
transformers.models.deprecated.jukebox.modeling_jukebox.JukeboxLabelConditioner
|
from torch import nn
class JukeboxLabelConditioner(nn.Module):
def __init__(self, config, include_time_signal):
super().__init__()
embed_dim = config.hidden_size
timing_dims = config.timing_dims
sampling_rate = config.sampling_rate
nb_genres, nb_artists = config.metadata_dims
music_tokens_shape = config.n_ctx
self.max_nb_genres = config.max_nb_genres
self.bow_genre_emb = nn.Embedding(nb_genres, embed_dim)
self.artist_emb = nn.Embedding(nb_artists, embed_dim)
self.include_time_signal = include_time_signal
if self.include_time_signal:
total_length_range = (config.min_duration * sampling_rate, config.max_duration * sampling_rate)
absolute_pos_range = (0.0, config.max_duration * sampling_rate)
relative_pos_range = (0.0, 1.0)
self.total_length_emb = JukeboxRangeEmbedding(1, timing_dims, total_length_range, embed_dim)
self.absolute_pos_emb = JukeboxRangeEmbedding(music_tokens_shape, timing_dims, absolute_pos_range, embed_dim)
self.relative_pos_emb = JukeboxRangeEmbedding(music_tokens_shape, timing_dims, relative_pos_range, embed_dim, clamp=True)
def forward(self, metadata):
total_length = metadata[:, 0:1]
offset = metadata[:, 1:2]
length = metadata[:, 2:3]
artist = metadata[:, 3:4]
genre = metadata[:, 4:]
artist_emb = self.artist_emb(artist)
mask = (genre >= 0).float().unsqueeze(2)
genre_emb = (self.bow_genre_emb(genre.clamp(0)) * mask).sum(dim=1, keepdim=True)
start_emb = genre_emb + artist_emb
if self.include_time_signal:
start, end = (offset, offset + length)
total_length = total_length.float()
start = start.float()
end = end.float()
pos_emb = self.total_length_emb(total_length) + self.absolute_pos_emb(start, end) + self.relative_pos_emb(start / total_length, end / total_length)
else:
pos_emb = None
return (start_emb, pos_emb)
|
class JukeboxLabelConditioner(nn.Module):
def __init__(self, config, include_time_signal):
pass
def forward(self, metadata):
pass
| 3
| 0
| 26
| 2
| 23
| 2
| 2
| 0.07
| 1
| 2
| 1
| 0
| 2
| 7
| 2
| 12
| 54
| 5
| 46
| 29
| 43
| 3
| 37
| 29
| 34
| 2
| 1
| 1
| 4
|
1,776
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/jukebox/modeling_jukebox.py
|
transformers.models.deprecated.jukebox.modeling_jukebox.JukeboxLayerNorm
|
import numpy as np
from torch.nn import LayerNorm as FusedLayerNorm
import torch.nn.functional as F
class JukeboxLayerNorm(FusedLayerNorm):
def __init__(self, normalized_shape, eps=1e-05, elementwise_affine=True):
super().__init__(normalized_shape, eps=eps, elementwise_affine=elementwise_affine)
self.width = np.prod(normalized_shape)
self.max_numel = 65535 * self.width
def forward(self, input):
if input.numel() > self.max_numel:
return F.layer_norm(input, self.normalized_shape, self.weight, self.bias, self.eps).type_as(input)
else:
return super().forward(input).type_as(input)
|
class JukeboxLayerNorm(FusedLayerNorm):
def __init__(self, normalized_shape, eps=1e-05, elementwise_affine=True):
pass
def forward(self, input):
pass
| 3
| 0
| 5
| 0
| 5
| 0
| 2
| 0
| 1
| 1
| 0
| 0
| 2
| 2
| 2
| 2
| 11
| 1
| 10
| 5
| 7
| 0
| 9
| 5
| 6
| 2
| 1
| 1
| 3
|
1,777
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/jukebox/modeling_jukebox.py
|
transformers.models.deprecated.jukebox.modeling_jukebox.JukeboxLayerStack
|
from torch import nn
from .configuration_jukebox import ATTENTION_PATTERNS, JukeboxConfig, JukeboxPriorConfig, JukeboxVQVAEConfig
class JukeboxLayerStack(nn.Module):
def __init__(self, config, n_ctx):
super().__init__()
self.n_ctx = n_ctx
self.width = config.hidden_size
self.num_layers = config.num_layers
self.blocks = config.blocks
self.attention_pattern = config.attention_pattern
if self.blocks is not None:
self.block_ctx = n_ctx // self.blocks
self.encoder_len = config.nb_relevant_lyric_tokens
self.n_heads = config.n_heads
attention_pattern = ATTENTION_PATTERNS[self.attention_pattern]
self._attn_mods = nn.ModuleList()
for depth in range(self.num_layers):
self._attn_mods.append(JukeboxBlock(config, n_ctx, attn_func=attention_pattern(depth)))
self.saved_attn_weights = []
def set_record_attn(self, record_attn):
"""
Makes forward prop dump self-attention softmaxes to self.saved_attn_weights.
Args:
record_attn (`Union[bool,set]`):
Either a set of layer indices indicating which layers to store, or a boolean value indicating Whether
to dump all.
"""
def _should_record_attn(layer_idx):
if isinstance(record_attn, bool):
return record_attn
return layer_idx in record_attn
for i, layer in enumerate(self._attn_mods):
layer.attn.record_attn = _should_record_attn(i)
if not record_attn:
self.saved_attn_weights = []
def forward(self, hidden_states, last_encoder_hidden_states=None, sample=False):
for i, attn_layer in enumerate(self._attn_mods):
if attn_layer.attn_func == 'cross_attention':
hidden_states = attn_layer(hidden_states, last_encoder_hidden_states=last_encoder_hidden_states, sample=sample)
else:
hidden_states = attn_layer(hidden_states, last_encoder_hidden_states=None, sample=sample)
if attn_layer.attn.record_attn:
self.saved_attn_weights.append(attn_layer.attn.c_attn.weight)
return hidden_states
def del_cache(self):
for attn_layer in self._attn_mods:
attn_layer.attn.del_cache()
|
class JukeboxLayerStack(nn.Module):
def __init__(self, config, n_ctx):
pass
def set_record_attn(self, record_attn):
'''
Makes forward prop dump self-attention softmaxes to self.saved_attn_weights.
Args:
record_attn (`Union[bool,set]`):
Either a set of layer indices indicating which layers to store, or a boolean value indicating Whether
to dump all.
'''
pass
def _should_record_attn(layer_idx):
pass
def forward(self, hidden_states, last_encoder_hidden_states=None, sample=False):
pass
def del_cache(self):
pass
| 6
| 1
| 12
| 1
| 9
| 2
| 3
| 0.25
| 1
| 5
| 1
| 0
| 4
| 10
| 4
| 14
| 58
| 9
| 40
| 21
| 34
| 10
| 37
| 21
| 31
| 4
| 1
| 2
| 14
|
1,778
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/jukebox/modeling_jukebox.py
|
transformers.models.deprecated.jukebox.modeling_jukebox.JukeboxMLP
|
from torch import nn
from ....activations import ACT2FN
class JukeboxMLP(nn.Module):
def __init__(self, config):
super().__init__()
embed_dim = config.hidden_size
hidden_dim = int(config.mlp_multiplier * embed_dim)
self.c_fc = JukeboxConv1D(embed_dim, hidden_dim)
self.c_proj = JukeboxConv1D(hidden_dim, embed_dim)
self.act = ACT2FN[config.act_fn]
self.dropout = nn.Dropout(config.resid_dropout)
def forward(self, hidden_states):
hidden_states = self.c_fc(hidden_states)
hidden_states = self.act(hidden_states)
hidden_states = self.c_proj(hidden_states)
hidden_states = self.dropout(hidden_states)
return hidden_states
|
class JukeboxMLP(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states):
pass
| 3
| 0
| 8
| 1
| 7
| 1
| 1
| 0.07
| 1
| 3
| 1
| 0
| 2
| 4
| 2
| 12
| 18
| 2
| 15
| 9
| 12
| 1
| 15
| 9
| 12
| 1
| 1
| 0
| 2
|
1,779
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/jukebox/modeling_jukebox.py
|
transformers.models.deprecated.jukebox.modeling_jukebox.JukeboxModel
|
from torch import nn
from ....utils.logging import tqdm
import os
import torch
from ....utils import add_start_docstrings, logging
import torch.nn.functional as F
@add_start_docstrings('The bare JUKEBOX Model used for music generation. 4 sampling techniques are supported : `primed_sample`, `upsample`,\n `continue_sample` and `ancestral_sample`. It does not have a `forward` method as the training is not end to end. If\n you want to fine-tune the model, it is recommended to use the `JukeboxPrior` class and train each prior\n individually.\n ', JUKEBOX_START_DOCSTRING)
class JukeboxModel(JukeboxPreTrainedModel):
_no_split_modules = ['JukeboxBlock']
def __init__(self, config):
super().__init__(config)
vqvae_config = config.vqvae_config
self.vqvae = JukeboxVQVAE(vqvae_config)
self.set_shared_params(config)
self.priors = nn.ModuleList([JukeboxPrior(config.prior_configs[level], level) for level in range(config.nb_priors)])
def set_shared_params(self, model_config):
"""
Initialises the parameters that are shared. This has to be done here because the list of `JukeboxPriorConfig`
is nest, and is thus unreachable in the `from_dict` function
"""
for config in model_config.prior_configs:
config.sampling_rate = model_config.sampling_rate
config.timing_dims = model_config.timing_dims
config.min_duration = model_config.min_duration
config.max_duration = model_config.max_duration
config.max_nb_genres = model_config.max_nb_genres
config.metadata_conditioning = model_config.metadata_conditioning
def decode(self, music_tokens, start_level=0, end_level=None, bs_chunks=1):
return self.vqvae.decode(music_tokens, start_level, end_level, bs_chunks)
def encode(self, input_audio, start_level=0, end_level=None, bs_chunks=1):
return self.vqvae.encode(input_audio, start_level, end_level, bs_chunks)
def split_batch(self, obj, n_samples, split_size):
n_passes = (n_samples + split_size - 1) // split_size
if isinstance(obj, torch.Tensor):
return torch.split(obj, split_size, dim=0)
elif isinstance(obj, list):
return list(zip(*[torch.split(item, split_size, dim=0) for item in obj]))
elif obj is None:
return [None] * n_passes
else:
raise TypeError('Unknown input type')
def sample_partial_window(self, music_tokens, labels, offset, sampling_kwargs, level, tokens_to_sample, max_batch_size):
prior = self.priors[level]
sampled_tokens = music_tokens[level]
n_ctx = prior.n_ctx
nb_sampled_tokens = sampled_tokens.shape[1]
if nb_sampled_tokens < n_ctx - tokens_to_sample:
sampling_kwargs['sample_tokens'] = nb_sampled_tokens + tokens_to_sample
start = 0
else:
sampling_kwargs['sample_tokens'] = n_ctx
start = nb_sampled_tokens - n_ctx + tokens_to_sample
return self.sample_single_window(music_tokens, labels, offset, sampling_kwargs, level, start, max_batch_size)
def sample_single_window(self, music_tokens, labels, offset, sampling_kwargs, level, start, max_batch_size):
prior = self.priors[level]
n_samples = music_tokens[0].shape[0]
n_ctx = prior.n_ctx
end = start + n_ctx
previous_sampled_tokens = music_tokens[level][:, start:end]
sample_tokens = sampling_kwargs.get('sample_tokens', None)
if 'sample_tokens' in sampling_kwargs:
sample_tokens = end - start
conditioning_tokens = previous_sampled_tokens.shape[1]
new_tokens = sample_tokens - previous_sampled_tokens.shape[1]
logger.info(f'Sampling {sample_tokens} tokens for [{start},{start + sample_tokens}]. Conditioning on {conditioning_tokens} tokens')
if new_tokens <= 0:
return music_tokens
music_tokens_conds = prior.get_music_tokens_conds(music_tokens, start, end)
metadata = prior.get_metadata(labels, start, self.total_length, offset)
music_tokens_list = self.split_batch(previous_sampled_tokens, n_samples, max_batch_size)
music_tokens_conds_list = self.split_batch(music_tokens_conds, n_samples, max_batch_size)
metadata_list = self.split_batch(metadata, n_samples, max_batch_size)
tokens = []
iterator = tqdm(zip(music_tokens_list, music_tokens_conds_list, metadata_list), leave=False)
for music_tokens_i, music_tokens_conds_i, metadata_i in iterator:
name = ['Ancestral', 'Primed'][music_tokens_i.shape[1] == 0]
iterator.set_description(f'[prior level {level}] {name} Sampling {sample_tokens} tokens out of {self.total_length // prior.raw_to_tokens}', refresh=True)
tokens_i = prior.sample(n_samples=music_tokens_i.shape[0], music_tokens=music_tokens_i, music_tokens_conds=music_tokens_conds_i, metadata=metadata_i, **sampling_kwargs)
tokens.append(tokens_i)
sampled_tokens = torch.cat(tokens, dim=0)
music_tokens_new = sampled_tokens[:, -new_tokens:]
music_tokens[level] = torch.cat([music_tokens[level], music_tokens_new], dim=1)
return music_tokens
def sample_level(self, music_tokens, labels, offset, sampling_kwargs, level, total_length, hop_length, max_batch_size):
if total_length >= self.priors[level].n_ctx:
iterator = get_starts(total_length, self.priors[level].n_ctx, hop_length)
for start in iterator:
music_tokens = self.sample_single_window(music_tokens, labels, offset, sampling_kwargs, level, start, max_batch_size)
else:
music_tokens = self.sample_partial_window(music_tokens, labels, offset, sampling_kwargs, level, total_length, max_batch_size)
return music_tokens
@torch.no_grad()
def _sample(self, music_tokens, labels, sample_levels, metas=None, chunk_size=32, sampling_temperature=0.98, lower_batch_size=16, max_batch_size=16, sample_length_in_seconds=24, compute_alignments=False, sample_tokens=None, offset=0, save_results=True, sample_length=None) -> list[torch.LongTensor]:
"""
Core sampling function used to generate music tokens. Iterates over the provided list of levels, while saving
the generated raw audio at each step.
Args:
music_tokens (`list[torch.LongTensor]`):
A sequence of music tokens of length `self.levels` which will be used as context to continue the
sampling process. Should have `self.levels` tensors, each corresponding to the generation at a certain
level.
labels (`list[torch.LongTensor]`):
List of length `n_sample`, and shape `(self.levels, 4 + self.config.max_nb_genre +
lyric_sequence_length)` metadata such as `artist_id`, `genre_id` and the full list of lyric tokens
which are used to condition the generation.
sample_levels (`list[int]`):
List of the desired levels at which the sampling will be done. A level is equivalent to the index of
the prior in the list of priors
metas (`list[Any]`, *optional*):
Metadatas used to generate the `labels`
chunk_size (`int`, *optional*, defaults to 32):
Size of a chunk of audio, used to fill up the memory in chunks to prevent OOM errors. Bigger chunks
means faster memory filling but more consumption.
sampling_temperature (`float`, *optional*, defaults to 0.98):
Temperature used to adjust the randomness of the sampling.
lower_batch_size (`int`, *optional*, defaults to 16):
Maximum batch size for the lower level priors
max_batch_size (`int`, *optional*, defaults to 16):
Maximum batch size for the top level priors
sample_length_in_seconds (`int`, *optional*, defaults to 24):
Desired length of the generation in seconds
compute_alignments (`bool`, *optional*, defaults to `False`):
Whether or not to compute the alignment between the lyrics and the audio using the top_prior
sample_tokens (`int`, *optional*):
Precise number of tokens that should be sampled at each level. This is mostly useful for running dummy
experiments
offset (`int`, *optional*, defaults to 0):
Audio offset used as conditioning, corresponds to the starting sample in the music. If the offset is
greater than 0, the lyrics will be shifted take that intoaccount
save_results (`bool`, *optional*, defaults to `True`):
Whether or not to save the intermediate results. If `True`, will generate a folder named with the start
time.
sample_length (`int`, *optional*):
Desired length of the generation in samples.
Returns: torch.Tensor
Example:
```python
>>> from transformers import AutoTokenizer, JukeboxModel, set_seed
>>> import torch
>>> metas = dict(artist="Zac Brown Band", genres="Country", lyrics="I met a traveller from an antique land")
>>> tokenizer = AutoTokenizer.from_pretrained("openai/jukebox-1b-lyrics")
>>> model = JukeboxModel.from_pretrained("openai/jukebox-1b-lyrics", min_duration=0).eval()
>>> labels = tokenizer(**metas)["input_ids"]
>>> set_seed(0)
>>> zs = [torch.zeros(1, 0, dtype=torch.long) for _ in range(3)]
>>> zs = model._sample(zs, labels, [0], sample_length=40 * model.priors[0].raw_to_tokens, save_results=False)
>>> zs[0]
tensor([[1853, 1369, 1150, 1869, 1379, 1789, 519, 710, 1306, 1100, 1229, 519,
353, 1306, 1379, 1053, 519, 653, 1631, 1467, 1229, 1229, 10, 1647,
1254, 1229, 1306, 1528, 1789, 216, 1631, 1434, 653, 475, 1150, 1528,
1804, 541, 1804, 1434]])
```
"""
top_prior = self.priors[0]
if sample_length is not None:
total_length = sample_length
else:
total_length = int(sample_length_in_seconds * self.config.sampling_rate) // top_prior.raw_to_tokens * top_prior.raw_to_tokens
if sample_levels is None:
sample_levels = range(len(self.priors))
self.total_length = total_length
for level in sample_levels:
sampling_kwargs = {'temp': 0.99 if level == len(self.priors) - 1 else sampling_temperature, 'chunk_size': chunk_size, 'sample_tokens': sample_tokens}
total_token_to_sample = total_length // self.priors[level].raw_to_tokens
hop_length = int(self.config.hop_fraction[level] * self.priors[level].n_ctx)
max_batch_size = lower_batch_size if level != sample_levels else max_batch_size
music_tokens = self.sample_level(music_tokens, labels[level], offset, sampling_kwargs, level, total_token_to_sample, hop_length, max_batch_size)
if save_results:
self.vqvae.to(music_tokens[level].device)
with torch.no_grad():
start_level = len(self.priors) - level - 1
raw_audio = self.vqvae.decode(music_tokens[:level + 1], start_level=start_level, bs_chunks=music_tokens[level].shape[0])
logdir = f'jukebox/level_{level}'
if not os.path.exists(logdir):
os.makedirs(logdir)
save_temp_audio(logdir, level, metas=metas, aud=raw_audio.float())
if compute_alignments and self.priors[0] is not None and (self.priors[0].nb_relevant_lyric_tokens > 0):
with torch.no_grad():
alignments = get_alignment(music_tokens, labels[0], self.priors[0], self.config)
torch.save({'alignments': alignments}, f'{logdir}/lyric_alignments.pt')
return music_tokens
@add_start_docstrings('\n Generates music tokens based on the provided `labels. Will start at the desired prior level and automatically\n upsample the sequence. If you want to create the audio, you should call `model.decode(tokens)`, which will use\n the VQ-VAE decoder to convert the music tokens to raw audio.\n\n Args:\n labels (`list[torch.LongTensor]`) :\n List of length `n_sample`, and shape `(self.levels, 4 + self.config.max_nb_genre +\n lyric_sequence_length)` metadata such as `artist_id`, `genre_id` and the full list of lyric tokens\n which are used to condition the generation.\n n_samples (`int`, *optional*, default to 1) :\n Number of samples to be generated in parallel.\n ')
def ancestral_sample(self, labels, n_samples=1, **sampling_kwargs) -> list[torch.LongTensor]:
"""
Example:
```python
>>> from transformers import AutoTokenizer, JukeboxModel, set_seed
>>> model = JukeboxModel.from_pretrained("openai/jukebox-1b-lyrics", min_duration=0).eval()
>>> tokenizer = AutoTokenizer.from_pretrained("openai/jukebox-1b-lyrics")
>>> lyrics = "Hey, are you awake? Can you talk to me?"
>>> artist = "Zac Brown Band"
>>> genre = "Country"
>>> metas = tokenizer(artist=artist, genres=genre, lyrics=lyrics)
>>> set_seed(0)
>>> music_tokens = model.ancestral_sample(metas.input_ids, sample_length=400)
>>> with torch.no_grad():
... model.decode(music_tokens)[:, :10].squeeze(-1)
tensor([[-0.0219, -0.0679, -0.1050, -0.1203, -0.1271, -0.0936, -0.0396, -0.0405,
-0.0818, -0.0697]])
```
"""
sample_levels = sampling_kwargs.pop('sample_levels', list(range(len(self.priors))))
music_tokens = [torch.zeros(n_samples, 0, dtype=torch.long, device=labels[0].device) for _ in range(len(self.priors))]
music_tokens = self._sample(music_tokens, labels, sample_levels, **sampling_kwargs)
return music_tokens
@add_start_docstrings('Generates a continuation of the previously generated tokens.\n\n Args:\n music_tokens (`list[torch.LongTensor]` of length `self.levels` ) :\n A sequence of music tokens which will be used as context to continue the sampling process. Should have\n `self.levels` tensors, each corresponding to the generation at a certain level.\n ', JUKEBOX_SAMPLING_INPUT_DOCSTRING)
def continue_sample(self, music_tokens, labels, **sampling_kwargs) -> list[torch.LongTensor]:
sample_levels = sampling_kwargs.pop('sample_levels', list(range(len(self.priors))))
music_tokens = self._sample(music_tokens, labels, sample_levels, **sampling_kwargs)
return music_tokens
@add_start_docstrings('Upsamples a sequence of music tokens using the prior at level `level`.\n\n Args:\n music_tokens (`list[torch.LongTensor]` of length `self.levels` ) :\n A sequence of music tokens which will be used as context to continue the sampling process. Should have\n `self.levels` tensors, each corresponding to the generation at a certain level.\n ', JUKEBOX_SAMPLING_INPUT_DOCSTRING)
def upsample(self, music_tokens, labels, **sampling_kwargs) -> list[torch.LongTensor]:
sample_levels = sampling_kwargs.pop('sample_levels', list(range(len(self.priors) - 1)))
music_tokens = self._sample(music_tokens, labels, sample_levels, **sampling_kwargs)
return music_tokens
@add_start_docstrings('Generate a raw audio conditioned on the provided `raw_audio` which is used as conditioning at each of the\n generation levels. The audio is encoded to music tokens using the 3 levels of the VQ-VAE. These tokens are\n used: as conditioning for each level, which means that no ancestral sampling is required.\n\n Args:\n raw_audio (`list[torch.Tensor]` of length `n_samples` ) :\n A list of raw audio that will be used as conditioning information for each samples that will be\n generated.\n ', JUKEBOX_SAMPLING_INPUT_DOCSTRING)
def primed_sample(self, raw_audio, labels, **sampling_kwargs) -> list[torch.LongTensor]:
sample_levels = sampling_kwargs.pop('sample_levels', list(range(len(self.priors))))
self.vqvae.to(raw_audio.device).float()
with torch.no_grad():
music_tokens = self.vqvae.encode(raw_audio, start_level=0, end_level=len(self.priors), bs_chunks=raw_audio.shape[0])
music_tokens = self._sample(music_tokens, labels, sample_levels, **sampling_kwargs)
return music_tokens
|
@add_start_docstrings('The bare JUKEBOX Model used for music generation. 4 sampling techniques are supported : `primed_sample`, `upsample`,\n `continue_sample` and `ancestral_sample`. It does not have a `forward` method as the training is not end to end. If\n you want to fine-tune the model, it is recommended to use the `JukeboxPrior` class and train each prior\n individually.\n ', JUKEBOX_START_DOCSTRING)
class JukeboxModel(JukeboxPreTrainedModel):
def __init__(self, config):
pass
def set_shared_params(self, model_config):
'''
Initialises the parameters that are shared. This has to be done here because the list of `JukeboxPriorConfig`
is nest, and is thus unreachable in the `from_dict` function
'''
pass
def decode(self, music_tokens, start_level=0, end_level=None, bs_chunks=1):
pass
def encode(self, input_audio, start_level=0, end_level=None, bs_chunks=1):
pass
def split_batch(self, obj, n_samples, split_size):
pass
def sample_partial_window(self, music_tokens, labels, offset, sampling_kwargs, level, tokens_to_sample, max_batch_size):
pass
def sample_single_window(self, music_tokens, labels, offset, sampling_kwargs, level, start, max_batch_size):
pass
def sample_level(self, music_tokens, labels, offset, sampling_kwargs, level, total_length, hop_length, max_batch_size):
pass
@torch.no_grad()
def _sample(self, music_tokens, labels, sample_levels, metas=None, chunk_size=32, sampling_temperature=0.98, lower_batch_size=16, max_batch_size=16, sample_length_in_seconds=24, compute_alignments=False, sample_tokens=None, offset=0, save_results=True, sample_length=None) -> list[torch.LongTensor]:
'''
Core sampling function used to generate music tokens. Iterates over the provided list of levels, while saving
the generated raw audio at each step.
Args:
music_tokens (`list[torch.LongTensor]`):
A sequence of music tokens of length `self.levels` which will be used as context to continue the
sampling process. Should have `self.levels` tensors, each corresponding to the generation at a certain
level.
labels (`list[torch.LongTensor]`):
List of length `n_sample`, and shape `(self.levels, 4 + self.config.max_nb_genre +
lyric_sequence_length)` metadata such as `artist_id`, `genre_id` and the full list of lyric tokens
which are used to condition the generation.
sample_levels (`list[int]`):
List of the desired levels at which the sampling will be done. A level is equivalent to the index of
the prior in the list of priors
metas (`list[Any]`, *optional*):
Metadatas used to generate the `labels`
chunk_size (`int`, *optional*, defaults to 32):
Size of a chunk of audio, used to fill up the memory in chunks to prevent OOM errors. Bigger chunks
means faster memory filling but more consumption.
sampling_temperature (`float`, *optional*, defaults to 0.98):
Temperature used to adjust the randomness of the sampling.
lower_batch_size (`int`, *optional*, defaults to 16):
Maximum batch size for the lower level priors
max_batch_size (`int`, *optional*, defaults to 16):
Maximum batch size for the top level priors
sample_length_in_seconds (`int`, *optional*, defaults to 24):
Desired length of the generation in seconds
compute_alignments (`bool`, *optional*, defaults to `False`):
Whether or not to compute the alignment between the lyrics and the audio using the top_prior
sample_tokens (`int`, *optional*):
Precise number of tokens that should be sampled at each level. This is mostly useful for running dummy
experiments
offset (`int`, *optional*, defaults to 0):
Audio offset used as conditioning, corresponds to the starting sample in the music. If the offset is
greater than 0, the lyrics will be shifted take that intoaccount
save_results (`bool`, *optional*, defaults to `True`):
Whether or not to save the intermediate results. If `True`, will generate a folder named with the start
time.
sample_length (`int`, *optional*):
Desired length of the generation in samples.
Returns: torch.Tensor
Example:
```python
>>> from transformers import AutoTokenizer, JukeboxModel, set_seed
>>> import torch
>>> metas = dict(artist="Zac Brown Band", genres="Country", lyrics="I met a traveller from an antique land")
>>> tokenizer = AutoTokenizer.from_pretrained("openai/jukebox-1b-lyrics")
>>> model = JukeboxModel.from_pretrained("openai/jukebox-1b-lyrics", min_duration=0).eval()
>>> labels = tokenizer(**metas)["input_ids"]
>>> set_seed(0)
>>> zs = [torch.zeros(1, 0, dtype=torch.long) for _ in range(3)]
>>> zs = model._sample(zs, labels, [0], sample_length=40 * model.priors[0].raw_to_tokens, save_results=False)
>>> zs[0]
tensor([[1853, 1369, 1150, 1869, 1379, 1789, 519, 710, 1306, 1100, 1229, 519,
353, 1306, 1379, 1053, 519, 653, 1631, 1467, 1229, 1229, 10, 1647,
1254, 1229, 1306, 1528, 1789, 216, 1631, 1434, 653, 475, 1150, 1528,
1804, 541, 1804, 1434]])
```
'''
pass
@add_start_docstrings('\n Generates music tokens based on the provided `labels. Will start at the desired prior level and automatically\n upsample the sequence. If you want to create the audio, you should call `model.decode(tokens)`, which will use\n the VQ-VAE decoder to convert the music tokens to raw audio.\n\n Args:\n labels (`list[torch.LongTensor]`) :\n List of length `n_sample`, and shape `(self.levels, 4 + self.config.max_nb_genre +\n lyric_sequence_length)` metadata such as `artist_id`, `genre_id` and the full list of lyric tokens\n which are used to condition the generation.\n n_samples (`int`, *optional*, default to 1) :\n Number of samples to be generated in parallel.\n ')
def ancestral_sample(self, labels, n_samples=1, **sampling_kwargs) -> list[torch.LongTensor]:
'''
Example:
```python
>>> from transformers import AutoTokenizer, JukeboxModel, set_seed
>>> model = JukeboxModel.from_pretrained("openai/jukebox-1b-lyrics", min_duration=0).eval()
>>> tokenizer = AutoTokenizer.from_pretrained("openai/jukebox-1b-lyrics")
>>> lyrics = "Hey, are you awake? Can you talk to me?"
>>> artist = "Zac Brown Band"
>>> genre = "Country"
>>> metas = tokenizer(artist=artist, genres=genre, lyrics=lyrics)
>>> set_seed(0)
>>> music_tokens = model.ancestral_sample(metas.input_ids, sample_length=400)
>>> with torch.no_grad():
... model.decode(music_tokens)[:, :10].squeeze(-1)
tensor([[-0.0219, -0.0679, -0.1050, -0.1203, -0.1271, -0.0936, -0.0396, -0.0405,
-0.0818, -0.0697]])
```
'''
pass
@add_start_docstrings('Generates a continuation of the previously generated tokens.\n\n Args:\n music_tokens (`list[torch.LongTensor]` of length `self.levels` ) :\n A sequence of music tokens which will be used as context to continue the sampling process. Should have\n `self.levels` tensors, each corresponding to the generation at a certain level.\n ', JUKEBOX_SAMPLING_INPUT_DOCSTRING)
def continue_sample(self, music_tokens, labels, **sampling_kwargs) -> list[torch.LongTensor]:
pass
@add_start_docstrings('Upsamples a sequence of music tokens using the prior at level `level`.\n\n Args:\n music_tokens (`list[torch.LongTensor]` of length `self.levels` ) :\n A sequence of music tokens which will be used as context to continue the sampling process. Should have\n `self.levels` tensors, each corresponding to the generation at a certain level.\n ', JUKEBOX_SAMPLING_INPUT_DOCSTRING)
def upsample(self, music_tokens, labels, **sampling_kwargs) -> list[torch.LongTensor]:
pass
@add_start_docstrings('Generate a raw audio conditioned on the provided `raw_audio` which is used as conditioning at each of the\n generation levels. The audio is encoded to music tokens using the 3 levels of the VQ-VAE. These tokens are\n used: as conditioning for each level, which means that no ancestral sampling is required.\n\n Args:\n raw_audio (`list[torch.Tensor]` of length `n_samples` ) :\n A list of raw audio that will be used as conditioning information for each samples that will be\n generated.\n ', JUKEBOX_SAMPLING_INPUT_DOCSTRING)
def primed_sample(self, raw_audio, labels, **sampling_kwargs) -> list[torch.LongTensor]:
pass
| 20
| 3
| 23
| 2
| 14
| 7
| 2
| 0.41
| 1
| 9
| 2
| 0
| 13
| 3
| 13
| 144
| 371
| 44
| 233
| 129
| 155
| 95
| 123
| 64
| 109
| 9
| 3
| 4
| 31
|
1,780
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/jukebox/modeling_jukebox.py
|
transformers.models.deprecated.jukebox.modeling_jukebox.JukeboxMusicTokenConditioner
|
from torch import nn
class JukeboxMusicTokenConditioner(nn.Module):
"""
The `JukeboxMusicTokenConditioner` takes music tokens as an input (corresponding to the codes of the VQVAE's
codebook) and upsamples it using a single layer of decoder convolution block (the same is used in the VQVAE).
"""
def __init__(self, config, level):
super().__init__()
self.embed_tokens = nn.Embedding(config.music_vocab_size, config.hidden_size)
config.embed_dim = config.music_vocab_size
self.upsampler = JukeboxDecoderConvBock(config, config.hidden_size, config.res_conv_width, config.res_conv_depth, config.res_downs_t[level], config.res_strides_t[level], reverse_dilation=False)
self.layer_norm = JukeboxLayerNorm(config.hidden_size)
def forward(self, music_tokens, raw_audio_conditioning=None):
"""
Args:
music_tokens (`torch.LongTensor`):
Music tokens form the upper level in range(nb_discrete_codes)
raw_audio_conditioning (`torch.LongTensor`, *optional*):
Audio used when primed sampling, raw audio information that conditions the generation
"""
if raw_audio_conditioning is None:
raw_audio_conditioning = 0.0
music_tokens = music_tokens.long()
hidden_states = self.embed_tokens(music_tokens)
hidden_states = hidden_states + raw_audio_conditioning
hidden_states = hidden_states.permute(0, 2, 1)
hidden_states = self.upsampler(hidden_states)
hidden_states = hidden_states.permute(0, 2, 1)
hidden_states = self.layer_norm(hidden_states)
return hidden_states
|
class JukeboxMusicTokenConditioner(nn.Module):
'''
The `JukeboxMusicTokenConditioner` takes music tokens as an input (corresponding to the codes of the VQVAE's
codebook) and upsamples it using a single layer of decoder convolution block (the same is used in the VQVAE).
'''
def __init__(self, config, level):
pass
def forward(self, music_tokens, raw_audio_conditioning=None):
'''
Args:
music_tokens (`torch.LongTensor`):
Music tokens form the upper level in range(nb_discrete_codes)
raw_audio_conditioning (`torch.LongTensor`, *optional*):
Audio used when primed sampling, raw audio information that conditions the generation
'''
pass
| 3
| 2
| 18
| 1
| 13
| 5
| 2
| 0.54
| 1
| 3
| 2
| 0
| 2
| 3
| 2
| 12
| 43
| 4
| 26
| 7
| 23
| 14
| 18
| 7
| 15
| 2
| 1
| 1
| 3
|
1,781
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/jukebox/modeling_jukebox.py
|
transformers.models.deprecated.jukebox.modeling_jukebox.JukeboxPositionalEmbedding
|
from torch import nn
import torch.nn.functional as F
import torch
class JukeboxPositionalEmbedding(nn.Module):
def __init__(self, embed_dim, width):
super().__init__()
self.pos_emb = nn.Parameter(torch.empty((embed_dim, width)))
def forward(self):
pos_emb = self.pos_emb
return pos_emb
|
class JukeboxPositionalEmbedding(nn.Module):
def __init__(self, embed_dim, width):
pass
def forward(self):
pass
| 3
| 0
| 3
| 0
| 3
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 2
| 1
| 2
| 12
| 8
| 1
| 7
| 5
| 4
| 0
| 7
| 5
| 4
| 1
| 1
| 0
| 2
|
1,782
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/jukebox/modeling_jukebox.py
|
transformers.models.deprecated.jukebox.modeling_jukebox.JukeboxPreTrainedModel
|
from .configuration_jukebox import ATTENTION_PATTERNS, JukeboxConfig, JukeboxPriorConfig, JukeboxVQVAEConfig
from ....modeling_utils import PreTrainedModel
class JukeboxPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config: JukeboxConfig
base_model_prefix = 'jukebox'
supports_gradient_checkpointing = False
def _init_weights(self, module):
if isinstance(module, (JukeboxPrior, JukeboxVQVAE)):
module.apply(module._init_weights)
def __init__(self, *inputs, **kwargs):
super().__init__(*inputs, **kwargs)
|
class JukeboxPreTrainedModel(PreTrainedModel):
'''
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
'''
def _init_weights(self, module):
pass
def __init__(self, *inputs, **kwargs):
pass
| 3
| 1
| 3
| 0
| 3
| 0
| 2
| 0.44
| 1
| 3
| 2
| 1
| 2
| 0
| 2
| 131
| 16
| 3
| 9
| 6
| 6
| 4
| 9
| 6
| 6
| 2
| 2
| 1
| 3
|
1,783
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/jukebox/modeling_jukebox.py
|
transformers.models.deprecated.jukebox.modeling_jukebox.JukeboxPrior
|
from torch import nn
from .configuration_jukebox import ATTENTION_PATTERNS, JukeboxConfig, JukeboxPriorConfig, JukeboxVQVAEConfig
from typing import Optional
import numpy as np
from ....modeling_utils import PreTrainedModel
import torch
import torch.nn.functional as F
class JukeboxPrior(PreTrainedModel):
"""
The JukeboxPrior class, which is a wrapper around the various conditioning and the transformer. JukeboxPrior can be
seen as language models trained on music. They model the next `music token` prediction task. If a (lyric) `encoderù
is defined, it also models the `next character` prediction on the lyrics. Can be conditioned on timing, artist,
genre, lyrics and codes from lower-levels Priors.
Args:
config (`JukeboxPriorConfig`):
Model configuration class with all the parameters of the model. Initializing with a config file does not
load the weights associated with the model, only the configuration. Check out the
[`~PreTrainedModel.from_pretrained`] method to load the model weights.
level (`int`, *optional*):
Current level of the Prior. Should be in range `[0,nb_priors]`.
nb_priors (`int`, *optional*, defaults to 3):
Total number of priors.
vqvae_encoder (`Callable`, *optional*):
Encoding method of the VQVAE encoder used in the forward pass of the model. Passing functions instead of
the vqvae module to avoid getting the parameters.
vqvae_decoder (`Callable`, *optional*):
Decoding method of the VQVAE decoder used in the forward pass of the model. Passing functions instead of
the vqvae module to avoid getting the parameters.
"""
config: JukeboxPriorConfig
def _init_weights(self, module):
init_scale = self.config.init_scale
if isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=0.02 * init_scale)
elif isinstance(module, JukeboxConv1D):
if self.config.zero_out:
module.weight.data.zero_()
else:
module.weight.data.normal_(mean=0.0, std=0.02 * init_scale)
elif isinstance(module, JukeboxPositionalEmbedding):
module.pos_emb.data.normal_(mean=0.0, std=0.01 * init_scale)
elif isinstance(module, JukeboxRangeEmbedding):
module.emb.weight.data.normal_(mean=0.0, std=0.01 * init_scale)
elif isinstance(module, JukeboxConditionalAutoregressive) and hasattr(module, 'lm_head'):
module.lm_head.weight.data.normal_(mean=0.0, std=0.02 * init_scale)
elif isinstance(module, JukeboxConditionalAutoregressive) and hasattr(module, 'start_token'):
module.start_token.data.normal_(mean=0.0, std=0.01 * init_scale)
elif isinstance(module, JukeboxResConv1DBlock) and self.config.zero_out:
module.conv1d_2.weight.data.zero_()
module.conv1d_2.bias.data.zero_()
if isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
def __init__(self, config: JukeboxPriorConfig, level=None, nb_priors=3, vqvae_encoder=None, vqvae_decoder=None):
super().__init__(config)
self.vqvae_encoder = vqvae_encoder
self.vqvae_decoder = vqvae_decoder
self.levels = nb_priors
self.level = level if level is not None else config.level
self.base_model_prefix = f'priors.{self.level}'
self.n_ctx = config.n_ctx
self.lyric_conditioning = config.nb_relevant_lyric_tokens > 0
self.nb_relevant_lyric_tokens = config.nb_relevant_lyric_tokens
self.encoder_loss_fraction = config.encoder_loss_fraction
self.audio_conditioning = self.level != 0
self.cond_level = self.level - 1
if self.audio_conditioning:
self.conditioner_blocks = JukeboxMusicTokenConditioner(config, self.level)
self.metadata_conditioning = config.metadata_conditioning
if self.metadata_conditioning:
self.metadata_embedding = JukeboxLabelConditioner(config, include_time_signal=not self.audio_conditioning)
self.is_encoder_decoder = config.is_encoder_decoder
if config.is_encoder_decoder:
self.input_shapes = [config.nb_relevant_lyric_tokens, config.n_ctx]
self.embed_dim_shift = [0, config.lyric_vocab_size]
self.width = config.hidden_size
self.nb_relevant_lyric_tokens = config.nb_relevant_lyric_tokens
self.prior = JukeboxConditionalAutoregressive(config, n_ctx=config.nb_relevant_lyric_tokens + config.n_ctx, embed_dim=config.lyric_vocab_size + config.music_vocab_size, audio_conditioning=self.audio_conditioning or self.metadata_conditioning, metadata_conditioning=True)
else:
encoder_config = config.encoder_config
if self.nb_relevant_lyric_tokens != 0 and self.lyric_conditioning:
self.lyric_acts_width = encoder_config.hidden_size
self.encoder_width = config.hidden_size
self.encoder_dim = config.lyric_vocab_size
self.encoder = JukeboxConditionalAutoregressive(encoder_config, n_ctx=self.nb_relevant_lyric_tokens, embed_dim=self.encoder_dim, audio_conditioning=False, metadata_conditioning=False, is_encoder=True)
self.encoder.proj_in = JukeboxConv1D(encoder_config.hidden_size, config.hidden_size)
self.encoder.final_layer_norm = JukeboxLayerNorm(config.hidden_size)
self.encoder.lm_head = nn.Linear(config.hidden_size, config.lyric_vocab_size, bias=False)
else:
self.nb_relevant_lyric_tokens = 0
self.prior = JukeboxConditionalAutoregressive(config, audio_conditioning=self.audio_conditioning or self.metadata_conditioning, metadata_conditioning=self.metadata_conditioning)
self.next_token_prediction_loss_dims = config.n_ctx
self.total_loss_dims = self.nb_relevant_lyric_tokens + self.next_token_prediction_loss_dims
self.downsamples = [stride ** down for stride, down in zip(config.res_strides_t, config.res_downs_t)]
self.cond_downsample = self.downsamples[self.level] if self.level != 0 else None
self.raw_to_tokens = np.prod(self.downsamples[:nb_priors - self.level])
self.sample_length = self.n_ctx * self.raw_to_tokens
logger.info(f'Level:{self.level}, Cond downsample:{self.cond_downsample}, Raw to tokens:{self.raw_to_tokens}, Sample length:{self.sample_length}')
def get_metadata(self, labels, start, total_length, offset, get_indices=False):
metadata = labels.clone()
metadata[:, 0] = total_length
metadata[:, 2] = int(self.sample_length)
metadata[:, 1:2] = int(offset * self.raw_to_tokens) + int(start * self.raw_to_tokens)
metadata, indices = self.set_metadata_lyric_tokens(metadata)
if get_indices:
return (metadata, indices)
else:
return metadata
def set_metadata_lyric_tokens(self, labels):
"""
Processes the full labels to only retrieve the relevant lyric tokens and keep the metadata conditioning tokens.
"""
if self.nb_relevant_lyric_tokens > 0:
tokens_list = torch.zeros((labels.shape[0], self.nb_relevant_lyric_tokens), dtype=torch.long, device=labels.device)
indices_list = []
for idx in range(labels.shape[0]):
full_tokens = labels.clone()[:, 4 + self.metadata_embedding.max_nb_genres:]
total_length, offset, duration = (labels[idx, 0], labels[idx, 1], labels[idx, 2])
tokens, indices = get_relevant_lyric_tokens(full_tokens, self.nb_relevant_lyric_tokens, total_length, offset, duration)
tokens_list[idx, :] = tokens
indices_list.append(indices)
return (torch.cat((labels[:, :4 + self.metadata_embedding.max_nb_genres], tokens_list), dim=-1), indices_list)
else:
return (labels, None)
def get_music_tokens_conds(self, music_tokens, start, end):
"""
Extracts current level's conditioning music tokens.
"""
if self.level != 0:
music_tokens_cond = music_tokens[self.level - 1]
music_tokens = music_tokens_cond[:, start // self.cond_downsample:end // self.cond_downsample]
missing_cond_len = self.n_ctx // self.cond_downsample - music_tokens_cond[-1].shape[-1]
if missing_cond_len > 0:
init_cond = torch.zeros(1, missing_cond_len).to(music_tokens_cond.device)
music_tokens_cond = torch.cat((music_tokens_cond, init_cond), dim=-1).long()
music_tokens_conds = [music_tokens_cond]
else:
music_tokens_conds = None
return music_tokens_conds
def prior_preprocess(self, tokens, conds):
"""
Shifts the input tokens to account for the dictionary merge. The embed_dim_shift give by how much the music
tokens should be shifted by. It is equal to `lyric_vocab_size`.
"""
batch_size = tokens[0].shape[0]
for i in range(len(tokens)):
tokens[i] = (tokens[i] + int(self.embed_dim_shift[i])).view(batch_size, -1)
for i in range(len(conds)):
if conds[i] is None:
conds[i] = torch.zeros((batch_size, self.input_shapes[i], self.width), dtype=tokens[0].dtype, device=tokens[0].device)
return (torch.cat(tokens, dim=1), torch.cat(conds, dim=1))
def prior_postprocess(self, tokens):
"""
Shifts back the input tokens if the model uses an encoder decoder architecture. As the embedding layer is
shared, `prior_embed_dim_shift` shifts the music token ids by `lyric_vocab_size`. Only returns the music
tokens.
"""
batch_size = tokens.shape[0]
dims = (self.input_shapes[0], tokens.shape[1] - self.input_shapes[0])
tokens = list(torch.split(tokens, dims, dim=1))
for i in range(len(tokens)):
bins_shift = int(self.embed_dim_shift[i])
tokens[i] = (tokens[i] - bins_shift).view(batch_size, -1)
tokens[i] = torch.clamp(tokens[i], min=0)
return tokens[-1]
def embed_tokens(self, music_tokens_conds):
"""
Embeds the upper level music tokens and upsamples them to provide as audio conditioning.
"""
music_tokens_conds = music_tokens_conds[:self.cond_level + 1]
audio_conditioning = None
for music_tokens_cond, conditioner_block in reversed(list(zip(music_tokens_conds, [self.conditioner_blocks]))):
audio_conditioning = conditioner_block(music_tokens_cond, audio_conditioning)
return audio_conditioning
def encode(self, hidden_states, start_level=None, end_level=None, bs_chunks=1):
"""
Encodes the hidden states (raw audio) using the VQVAE's encoder. Returns latent_states.
"""
if start_level is None:
start_level = self.level
if end_level is None:
end_level = self.levels
with torch.no_grad():
latent_states = self.vqvae_encoder(hidden_states, start_level=start_level, end_level=end_level, bs_chunks=bs_chunks)
return latent_states
def decode(self, music_tokens, start_level=None, end_level=None, bs_chunks=1):
"""
Usamples the sequence of codebook vectors to a raw audio.
"""
if start_level is None:
start_level = self.level
if end_level is None:
end_level = self.levels
with torch.no_grad():
output = self.vqvae_decoder(music_tokens, start_level=start_level, end_level=end_level, bs_chunks=bs_chunks)
return output
def get_cond(self, music_tokens_conds, metadata):
"""
Converts the input tokens to input_embeddings. Splits the lyrics form the rest of the metadata. Lyric tokens
can be None.
"""
if metadata is not None:
n_labels = metadata.shape[1] - self.nb_relevant_lyric_tokens
metadata, lyric_tokens = (metadata[:, :n_labels], metadata[:, n_labels:])
else:
metadata, lyric_tokens = (None, None)
metadata_conditioning, metadata_pos = self.metadata_embedding(metadata) if self.metadata_conditioning else (None, None)
audio_conditioning = self.embed_tokens(music_tokens_conds) if self.audio_conditioning else metadata_pos
return (audio_conditioning, metadata_conditioning, lyric_tokens)
def sample(self, n_samples, music_tokens=None, music_tokens_conds=None, metadata=None, temp=1.0, top_k=0, top_p=0.0, chunk_size=None, sample_tokens=None):
"""
Ancestral/Prime sampling a window of tokens using the provided conditioning and metadatas.
Args:
n_samples (`int`):
Number of samples to generate.
music_tokens (`list[torch.LongTensor]`, *optional*):
Previously generated tokens at the current level. Used as context for the generation.
music_tokens_conds (`list[torch.FloatTensor]`, *optional*):
Upper-level music tokens generated by the previous prior model. Is `None` if the generation is not
conditioned on the upper-level tokens.
metadata (`list[torch.LongTensor]`, *optional*):
List containing the metadata tensor with the artist, genre and the lyric tokens.
temp (`float`, *optional*, defaults to 1.0):
Sampling temperature.
top_k (`int`, *optional*, defaults to 0):
Top k probabilities used for filtering.
top_p (`float`, *optional*, defaults to 0.0):
Top p probabilities used for filtering.
chunk_size (`int`, *optional*):
Size of the chunks used to prepare the cache of the transformer.
sample_tokens (`int`, *optional*):
Number of tokens to sample.
"""
no_past_context = music_tokens is None or music_tokens.shape[1] == 0
name = {True: 'Ancestral', False: 'Primed'}[no_past_context]
logger.info(f'{name} sampling {n_samples} samples with temp={temp}, top_k={top_k}, top_p={top_p}')
with torch.no_grad():
audio_conditioning, metadata_conditioning, lyric_tokens = self.get_cond(music_tokens_conds, metadata)
if self.is_encoder_decoder:
if no_past_context:
lyric_and_music_tokens, audio_conditioning = self.prior_preprocess([lyric_tokens], [None, audio_conditioning])
else:
lyric_and_music_tokens, audio_conditioning = self.prior_preprocess([lyric_tokens, music_tokens], [None, audio_conditioning])
if sample_tokens is not None:
sample_tokens += self.nb_relevant_lyric_tokens
music_tokens = self.prior.primed_sample(n_samples, lyric_and_music_tokens, audio_conditioning, metadata_conditioning, temp=temp, top_k=top_k, top_p=top_p, chunk_size=chunk_size, sample_tokens=sample_tokens)
music_tokens = self.prior_postprocess(music_tokens)
else:
last_encoder_hidden_states = self.get_encoder_states(lyric_tokens, sample=True)
if no_past_context:
music_tokens = self.prior.sample(n_samples, audio_conditioning, metadata_conditioning, last_encoder_hidden_states, temp=temp, top_k=top_k, top_p=top_p, sample_tokens=sample_tokens)
else:
music_tokens = self.prior.primed_sample(n_samples, music_tokens, audio_conditioning, metadata_conditioning, last_encoder_hidden_states, temp=temp, top_k=top_k, top_p=top_p, chunk_size=chunk_size, sample_tokens=sample_tokens)
return music_tokens
def get_encoder_states(self, lyric_tokens, sample=False):
"""
Retrieve the last hidden_states of the lyric encoder that will be attended to by the decoder. Forwards through
the lyric encoder.
"""
if self.nb_relevant_lyric_tokens != 0 and self.lyric_conditioning:
if sample:
self.encoder = self.encoder.to(lyric_tokens.device)
lyric_acts = self.encoder(lyric_tokens, None, None, None)
lyric_acts = self.encoder.proj_in(lyric_acts)
last_encoder_hidden_states = self.encoder.final_layer_norm(lyric_acts)
else:
last_encoder_hidden_states = None
return last_encoder_hidden_states
def get_encoder_loss(self, last_encoder_hidden_states, target_lyrics):
"""
Computes the loss for the lyric encoder: next lyric token prediction.
"""
if self.lyric_conditioning:
last_encoder_hidden_states = self.encoder.lm_head(last_encoder_hidden_states)
encoder_loss = nn.functional.cross_entropy(last_encoder_hidden_states.view(-1, self.encoder_dim), target_lyrics.view(-1)) / np.log(2.0)
else:
encoder_loss = torch.tensor(0.0, device=last_encoder_hidden_states.device)
return encoder_loss
def forward_tokens(self, music_tokens, music_tokens_conds=[], metadata=None, get_preds=False, get_attn_weights=False):
"""
Applies a forward pass using the conditioning tokens. Different from the classic forward as it does not use the
vqvae's encoding layers.
"""
if get_attn_weights:
self.prior.transformer.set_record_attn(get_attn_weights)
audio_conditioning, metadata_conditioning, lyric_tokens = self.get_cond(music_tokens_conds, metadata)
if self.is_encoder_decoder:
tokens, audio_conditioning = self.prior_preprocess([lyric_tokens, music_tokens], [None, audio_conditioning])
(encoder_loss, next_token_prediction_loss), preds = self.prior(tokens, audio_conditioning, metadata_conditioning, get_sep_loss=True, get_preds=get_preds)
else:
last_encoder_hidden_states = self.get_encoder_states(lyric_tokens)
encoder_loss = self.get_encoder_loss(last_encoder_hidden_states, lyric_tokens)
next_token_prediction_loss, preds = self.prior(music_tokens, audio_conditioning, metadata_conditioning, last_encoder_hidden_states, get_preds=get_preds)
loss = self.encoder_loss_fraction * encoder_loss * self.nb_relevant_lyric_tokens / self.total_loss_dims
loss += next_token_prediction_loss * self.next_token_prediction_loss_dims / self.total_loss_dims
metrics = {'bpd': next_token_prediction_loss.detach().clone(), 'encoder_loss': encoder_loss.detach().clone(), 'next_token_prediction_loss': next_token_prediction_loss.detach().clone()}
if get_preds:
metrics['preds'] = preds.detach().clone()
if get_attn_weights:
saved_attn_weights = self.prior.transformer.saved_attn_weights
self.prior.transformer.set_record_attn(False)
return saved_attn_weights
else:
return (loss, metrics)
def forward(self, hidden_states: torch.Tensor, metadata: Optional[list[torch.LongTensor]], decode: Optional[bool]=False, get_preds: Optional[bool]=False) -> list[torch.Tensor]:
"""
Encode the hidden states using the `vqvae` encoder, and then predicts the next token in the `forward_tokens`
function. The loss is the sum of the `encoder` loss and the `decoder` loss.
Args:
hidden_states (`torch.Tensor`):
Hidden states which should be raw audio
metadata (`list[torch.LongTensor]`, *optional*):
List containing the metadata conditioning tensor with the lyric and the metadata tokens.
decode (`bool`, *optional*, defaults to `False`):
Whether or not to decode the encoded to tokens.
get_preds (`bool`, *optional*, defaults to `False`):
Whether or not to return the actual predictions of the model.
"""
batch_size = hidden_states.shape[0]
music_tokens, *music_tokens_conds = self.encode(hidden_states, bs_chunks=batch_size)
loss, metrics = self.forward_tokens(music_tokens=music_tokens, music_tokens_conds=music_tokens_conds, metadata=metadata, get_preds=get_preds)
if decode:
dequantised_states = self.decode([music_tokens, *music_tokens_conds])
else:
dequantised_states = None
return (dequantised_states, loss, metrics)
|
class JukeboxPrior(PreTrainedModel):
'''
The JukeboxPrior class, which is a wrapper around the various conditioning and the transformer. JukeboxPrior can be
seen as language models trained on music. They model the next `music token` prediction task. If a (lyric) `encoderù
is defined, it also models the `next character` prediction on the lyrics. Can be conditioned on timing, artist,
genre, lyrics and codes from lower-levels Priors.
Args:
config (`JukeboxPriorConfig`):
Model configuration class with all the parameters of the model. Initializing with a config file does not
load the weights associated with the model, only the configuration. Check out the
[`~PreTrainedModel.from_pretrained`] method to load the model weights.
level (`int`, *optional*):
Current level of the Prior. Should be in range `[0,nb_priors]`.
nb_priors (`int`, *optional*, defaults to 3):
Total number of priors.
vqvae_encoder (`Callable`, *optional*):
Encoding method of the VQVAE encoder used in the forward pass of the model. Passing functions instead of
the vqvae module to avoid getting the parameters.
vqvae_decoder (`Callable`, *optional*):
Decoding method of the VQVAE decoder used in the forward pass of the model. Passing functions instead of
the vqvae module to avoid getting the parameters.
'''
def _init_weights(self, module):
pass
def __init__(self, config: JukeboxPriorConfig, level=None, nb_priors=3, vqvae_encoder=None, vqvae_decoder=None):
pass
def get_metadata(self, labels, start, total_length, offset, get_indices=False):
pass
def set_metadata_lyric_tokens(self, labels):
'''
Processes the full labels to only retrieve the relevant lyric tokens and keep the metadata conditioning tokens.
'''
pass
def get_music_tokens_conds(self, music_tokens, start, end):
'''
Extracts current level's conditioning music tokens.
'''
pass
def prior_preprocess(self, tokens, conds):
'''
Shifts the input tokens to account for the dictionary merge. The embed_dim_shift give by how much the music
tokens should be shifted by. It is equal to `lyric_vocab_size`.
'''
pass
def prior_postprocess(self, tokens):
'''
Shifts back the input tokens if the model uses an encoder decoder architecture. As the embedding layer is
shared, `prior_embed_dim_shift` shifts the music token ids by `lyric_vocab_size`. Only returns the music
tokens.
'''
pass
def embed_tokens(self, music_tokens_conds):
'''
Embeds the upper level music tokens and upsamples them to provide as audio conditioning.
'''
pass
def encode(self, hidden_states, start_level=None, end_level=None, bs_chunks=1):
'''
Encodes the hidden states (raw audio) using the VQVAE's encoder. Returns latent_states.
'''
pass
def decode(self, music_tokens, start_level=None, end_level=None, bs_chunks=1):
'''
Usamples the sequence of codebook vectors to a raw audio.
'''
pass
def get_cond(self, music_tokens_conds, metadata):
'''
Converts the input tokens to input_embeddings. Splits the lyrics form the rest of the metadata. Lyric tokens
can be None.
'''
pass
def sample(self, n_samples, music_tokens=None, music_tokens_conds=None, metadata=None, temp=1.0, top_k=0, top_p=0.0, chunk_size=None, sample_tokens=None):
'''
Ancestral/Prime sampling a window of tokens using the provided conditioning and metadatas.
Args:
n_samples (`int`):
Number of samples to generate.
music_tokens (`list[torch.LongTensor]`, *optional*):
Previously generated tokens at the current level. Used as context for the generation.
music_tokens_conds (`list[torch.FloatTensor]`, *optional*):
Upper-level music tokens generated by the previous prior model. Is `None` if the generation is not
conditioned on the upper-level tokens.
metadata (`list[torch.LongTensor]`, *optional*):
List containing the metadata tensor with the artist, genre and the lyric tokens.
temp (`float`, *optional*, defaults to 1.0):
Sampling temperature.
top_k (`int`, *optional*, defaults to 0):
Top k probabilities used for filtering.
top_p (`float`, *optional*, defaults to 0.0):
Top p probabilities used for filtering.
chunk_size (`int`, *optional*):
Size of the chunks used to prepare the cache of the transformer.
sample_tokens (`int`, *optional*):
Number of tokens to sample.
'''
pass
def get_encoder_states(self, lyric_tokens, sample=False):
'''
Retrieve the last hidden_states of the lyric encoder that will be attended to by the decoder. Forwards through
the lyric encoder.
'''
pass
def get_encoder_loss(self, last_encoder_hidden_states, target_lyrics):
'''
Computes the loss for the lyric encoder: next lyric token prediction.
'''
pass
def forward_tokens(self, music_tokens, music_tokens_conds=[], metadata=None, get_preds=False, get_attn_weights=False):
'''
Applies a forward pass using the conditioning tokens. Different from the classic forward as it does not use the
vqvae's encoding layers.
'''
pass
def forward_tokens(self, music_tokens, music_tokens_conds=[], metadata=None, get_preds=False, get_attn_weights=False):
'''
Encode the hidden states using the `vqvae` encoder, and then predicts the next token in the `forward_tokens`
function. The loss is the sum of the `encoder` loss and the `decoder` loss.
Args:
hidden_states (`torch.Tensor`):
Hidden states which should be raw audio
metadata (`list[torch.LongTensor]`, *optional*):
List containing the metadata conditioning tensor with the lyric and the metadata tokens.
decode (`bool`, *optional*, defaults to `False`):
Whether or not to decode the encoded to tokens.
get_preds (`bool`, *optional*, defaults to `False`):
Whether or not to return the actual predictions of the model.
'''
pass
| 17
| 14
| 28
| 2
| 21
| 6
| 4
| 0.34
| 1
| 17
| 9
| 0
| 16
| 29
| 16
| 145
| 492
| 46
| 334
| 113
| 298
| 115
| 204
| 94
| 187
| 11
| 2
| 3
| 61
|
1,784
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/jukebox/modeling_jukebox.py
|
transformers.models.deprecated.jukebox.modeling_jukebox.JukeboxRangeEmbedding
|
from torch import nn
import torch.nn.functional as F
import torch
class JukeboxRangeEmbedding(nn.Module):
"""
The `JukeboxRangeEmbedding` interpolate the given [pos_start, pos_end] to obtain an equivalent of time positional
embedding of length `n_ctx`.
Binning process : For each pos in position tensor, find its bin [start,end) mapped to [0,1,...,bins-1] [start,end)
-> [0,1) -> [0, bins) -> floor -> [0,...,bins-1] NOTE: Open ended interval on right, so start <= pos < end, not <=
end
"""
def __init__(self, n_time, embed_dim, range, out_width, clamp=False):
super().__init__()
self.n_time = n_time
self.embed_dim = embed_dim
self.emb = nn.Embedding(embed_dim, out_width)
self.pos_min, self.pos_max = range
self.clamp = clamp
def forward(self, pos_start, pos_end=None):
if not len(pos_start.shape) == 2:
raise TypeError(f'Expected shape with 2 dims, got {pos_start.shape}')
if not (self.pos_min <= pos_start).all() and (pos_start < self.pos_max).all():
raise TypeError(f'Range is [{self.pos_min},{self.pos_max}), got {pos_start}')
pos_start = pos_start.float()
if pos_end is not None:
if self.clamp:
pos_end = pos_end.clamp(self.pos_min, self.pos_max)
pos_end = pos_end.float()
n_time = self.n_time
if n_time != 1:
interpolation = torch.arange(0, n_time, dtype=torch.float, device=pos_start.device).view(1, n_time) / n_time
position = pos_start + (pos_end - pos_start) * interpolation
else:
position = pos_start
normalised_position = (position - self.pos_min) / (self.pos_max - self.pos_min)
bins_ = (self.embed_dim * normalised_position).floor().long().detach()
return self.emb(bins_)
|
class JukeboxRangeEmbedding(nn.Module):
'''
The `JukeboxRangeEmbedding` interpolate the given [pos_start, pos_end] to obtain an equivalent of time positional
embedding of length `n_ctx`.
Binning process : For each pos in position tensor, find its bin [start,end) mapped to [0,1,...,bins-1] [start,end)
-> [0,1) -> [0, bins) -> floor -> [0,...,bins-1] NOTE: Open ended interval on right, so start <= pos < end, not <=
end
'''
def __init__(self, n_time, embed_dim, range, out_width, clamp=False):
pass
def forward(self, pos_start, pos_end=None):
pass
| 3
| 1
| 18
| 2
| 14
| 2
| 4
| 0.38
| 1
| 3
| 0
| 0
| 2
| 6
| 2
| 12
| 46
| 6
| 29
| 13
| 26
| 11
| 26
| 13
| 23
| 6
| 1
| 2
| 7
|
1,785
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/jukebox/modeling_jukebox.py
|
transformers.models.deprecated.jukebox.modeling_jukebox.JukeboxResConv1DBlock
|
from torch import nn
class JukeboxResConv1DBlock(nn.Module):
def __init__(self, config, conv_width, depth=1, res_scale=1.0):
super().__init__()
hidden_dim = config.res_convolution_multiplier * conv_width
dilation = config.res_dilation_growth_rate ** depth
padding = dilation
self.res_scale = res_scale
self.activation = nn.ReLU()
self.conv1d_1 = nn.Conv1d(conv_width, hidden_dim, 3, 1, padding, dilation)
self.conv1d_2 = nn.Conv1d(hidden_dim, conv_width, 1, 1, 0)
def forward(self, hidden_states):
residuals = hidden_states
hidden_states = self.activation(hidden_states)
hidden_states = self.conv1d_1(hidden_states)
hidden_states = self.activation(hidden_states)
hidden_states = self.conv1d_2(hidden_states)
return residuals + self.res_scale * hidden_states
|
class JukeboxResConv1DBlock(nn.Module):
def __init__(self, config, conv_width, depth=1, res_scale=1.0):
pass
def forward(self, hidden_states):
pass
| 3
| 0
| 9
| 1
| 8
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 2
| 4
| 2
| 12
| 19
| 2
| 17
| 11
| 14
| 0
| 17
| 11
| 14
| 1
| 1
| 0
| 2
|
1,786
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/jukebox/modeling_jukebox.py
|
transformers.models.deprecated.jukebox.modeling_jukebox.JukeboxResnet1D
|
from torch import nn
import math
class JukeboxResnet1D(nn.Module):
def __init__(self, config, conv_width, n_depth, reverse_dilation=False):
super().__init__()
self.dilation_cycle = config.res_dilation_cycle
res_scale = 1.0 if not config.conv_res_scale else 1.0 / math.sqrt(n_depth)
blocks = []
for depth in range(n_depth):
block_depth = depth if self.dilation_cycle is None else depth % self.dilation_cycle
blocks.append(JukeboxResConv1DBlock(config, conv_width, block_depth, res_scale))
if reverse_dilation:
blocks = blocks[::-1]
self.resnet_block = nn.ModuleList(blocks)
def forward(self, hidden_states):
for block in self.resnet_block:
hidden_states = block(hidden_states)
return hidden_states
|
class JukeboxResnet1D(nn.Module):
def __init__(self, config, conv_width, n_depth, reverse_dilation=False):
pass
def forward(self, hidden_states):
pass
| 3
| 0
| 9
| 1
| 8
| 0
| 4
| 0
| 1
| 3
| 1
| 0
| 2
| 2
| 2
| 12
| 19
| 3
| 16
| 10
| 13
| 0
| 16
| 10
| 13
| 5
| 1
| 1
| 7
|
1,787
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/jukebox/modeling_jukebox.py
|
transformers.models.deprecated.jukebox.modeling_jukebox.JukeboxVQVAE
|
from torch import nn
from .configuration_jukebox import ATTENTION_PATTERNS, JukeboxConfig, JukeboxPriorConfig, JukeboxVQVAEConfig
import numpy as np
from ....modeling_utils import PreTrainedModel
import torch
from ....utils import add_start_docstrings, logging
import torch.nn.functional as F
@add_start_docstrings('The Hierarchical VQ-VAE model used in Jukebox. This model follows the Hierarchical VQVAE paper from [Will Williams, Sam\nRinger, Tom Ash, John Hughes, David MacLeod, Jamie Dougherty](https://huggingface.co/papers/2002.08111).\n\n ', JUKEBOX_START_DOCSTRING)
class JukeboxVQVAE(PreTrainedModel):
config: JukeboxVQVAEConfig
base_model_prefix = 'vqvae'
def _init_weights(self, module):
if isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=0.02 * self.config.init_scale)
elif isinstance(module, JukeboxConv1D):
if self.config.zero_out:
module.weight.data.zero_()
else:
module.weight.data.normal_(mean=0.0, std=0.02 * self.config.init_scale)
elif isinstance(module, JukeboxResConv1DBlock) and self.config.zero_out:
module.conv1d_2.weight.data.zero_()
module.conv1d_2.bias.data.zero_()
if isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
def __init__(self, config: JukeboxVQVAEConfig):
super().__init__(config)
downs_t = config.res_downs_t
strides_t = config.res_strides_t
if not config.sample_length:
downsamples = [stride ** down for stride, down in zip(strides_t, downs_t)]
top_raw_to_tokens = np.prod(downsamples)
config.sample_length = config.sample_length_in_seconds * config.sampling_rate // top_raw_to_tokens * top_raw_to_tokens
config.sample_length = config.sample_length.astype(int)
self.nb_discrete_codes = config.nb_discrete_codes
self.commit = config.commit
self.sample_length = config.sample_length
self.downsamples = [stride ** down for stride, down in zip(strides_t, downs_t)]
self.hop_lengths = np.cumprod(self.downsamples)
self.levels = levels = config.levels
self.music_tokens_shapes = [int(self.sample_length // self.hop_lengths[-level - 1]) for level in range(levels)]
self.multipliers = config.multipliers if config.multipliers is not None else [1] * levels
self.encoders = nn.ModuleList()
self.decoders = nn.ModuleList()
for level in range(levels):
width = config.res_conv_width * self.multipliers[level]
depth = config.res_conv_depth * self.multipliers[level]
self.encoders.append(JukeboxEncoder(config, width, depth, level + 1, downs_t[:level + 1], strides_t[:level + 1]))
self.decoders.append(JukeboxDecoder(config, width, depth, level + 1, downs_t[:level + 1], strides_t[:level + 1]))
self.bottleneck = JukeboxBottleneck(config, levels)
def _decode(self, music_tokens, start_level=0, end_level=None):
if end_level is None:
end_level = self.levels
latent_states = self.bottleneck.decode(music_tokens, start_level=start_level, end_level=end_level)
decoder, dequantised_state = (self.decoders[start_level], latent_states[0:1])
dequantised_state = decoder(dequantised_state, all_levels=False)
dequantised_state = dequantised_state.permute(0, 2, 1)
return dequantised_state
def decode(self, music_tokens, start_level=0, end_level=None, bs_chunks=1) -> torch.Tensor:
"""
Transforms the input `music_tokens` to their `raw_audio` representation.
Args:
music_tokens (`torch.LongTensor`):
Tensor of music tokens which will be decoded to raw audio by using the codebook. Each music token
should be an index to a corresponding `code` vector in the codebook.
start_level (`int`, *optional*):
Level at which the decoding process will start. Default to 0.
end_level (`int`, *optional*):
Level at which the decoding process will start. Default to None.
bs_chunks (int, *optional*):
Number of chunks to process at the same time.
"""
token_chunks = [torch.chunk(token, bs_chunks, dim=0) for token in music_tokens]
dequantised_states = []
for i in range(bs_chunks):
music_tokens_i = [chunks[i] for chunks in token_chunks]
dequantised_state = self._decode(music_tokens_i, start_level=start_level, end_level=end_level)
dequantised_states.append(dequantised_state)
return torch.cat(dequantised_states, dim=0)
def _encode(self, raw_audio, start_level=0, end_level=None):
if end_level is None:
end_level = self.levels
input_audio = raw_audio.permute(0, 2, 1).float()
latent_states = []
for level in range(self.levels):
encoder = self.encoders[level]
latent_state = encoder(input_audio)
latent_states.append(latent_state[-1])
music_tokens = self.bottleneck.encode(latent_states)
return music_tokens[start_level:end_level]
def encode(self, input_audio, start_level=0, end_level=None, bs_chunks=1):
"""
Transforms the `input_audio` to a discrete representation made out of `music_tokens`.
Args:
input_audio (`torch.Tensor`):
Raw audio which will be encoded to its discrete representation using the codebook. The closest `code`
form the codebook will be computed for each sequence of samples.
start_level (`int`, *optional*, defaults to 0):
Level at which the encoding process will start. Default to 0.
end_level (`int`, *optional*):
Level at which the encoding process will start. Default to None.
bs_chunks (int, *optional*, defaults to 1):
Number of chunks of raw audio to process at the same time.
"""
audio_chunks = torch.chunk(input_audio, bs_chunks, dim=0)
music_tokens_list = []
for chunk_i in audio_chunks:
music_tokens_i = self._encode(chunk_i, start_level=start_level, end_level=end_level)
music_tokens_list.append(music_tokens_i)
music_tokens = [torch.cat(music_tokens_level, dim=0) for music_tokens_level in zip(*music_tokens_list)]
return music_tokens
def sample(self, n_samples):
music_tokens = [torch.randint(0, self.nb_discrete_codes, size=(n_samples, *music_tokens_shape), device='cpu') for music_tokens_shape in self.music_tokens_shapes]
return self.decode(music_tokens)
def forward(self, raw_audio: torch.FloatTensor) -> tuple[torch.Tensor, torch.Tensor]:
"""
Forward pass of the VQ-VAE, encodes the `raw_audio` to latent states, which are then decoded for each level.
The commit loss, which ensure that the encoder's computed embeddings are close to the codebook vectors, is
computed.
Args:
raw_audio (`torch.FloatTensor`):
Audio input which will be encoded and decoded.
Returns:
`tuple[torch.Tensor, torch.Tensor]`
Example:
```python
>>> from transformers import JukeboxVQVAE, set_seed
>>> import torch
>>> model = JukeboxVQVAE.from_pretrained("openai/jukebox-1b-lyrics").eval()
>>> set_seed(0)
>>> zs = [torch.randint(100, (4, 1))]
>>> model.decode(zs).shape
torch.Size([4, 8, 1])
```
"""
input_audio = raw_audio.permute(0, 2, 1).float()
latent_states = []
for level in range(self.levels):
encoder = self.encoders[level]
latent_state = encoder(input_audio)
latent_states.append(latent_state[-1])
_, music_tokens, commit_losses, _ = self.bottleneck(latent_states)
dequantised_states = []
for level in range(self.levels):
decoder = self.decoders[level]
dequantised_state = decoder(music_tokens[level:level + 1], all_levels=False)
dequantised_states.append(dequantised_state.permute(0, 2, 1))
commit_loss = sum(commit_losses)
loss = self.commit * commit_loss
return (dequantised_states, loss)
|
@add_start_docstrings('The Hierarchical VQ-VAE model used in Jukebox. This model follows the Hierarchical VQVAE paper from [Will Williams, Sam\nRinger, Tom Ash, John Hughes, David MacLeod, Jamie Dougherty](https://huggingface.co/papers/2002.08111).\n\n ', JUKEBOX_START_DOCSTRING)
class JukeboxVQVAE(PreTrainedModel):
def _init_weights(self, module):
pass
def __init__(self, config: JukeboxVQVAEConfig):
pass
def _decode(self, music_tokens, start_level=0, end_level=None):
pass
def decode(self, music_tokens, start_level=0, end_level=None, bs_chunks=1) -> torch.Tensor:
'''
Transforms the input `music_tokens` to their `raw_audio` representation.
Args:
music_tokens (`torch.LongTensor`):
Tensor of music tokens which will be decoded to raw audio by using the codebook. Each music token
should be an index to a corresponding `code` vector in the codebook.
start_level (`int`, *optional*):
Level at which the decoding process will start. Default to 0.
end_level (`int`, *optional*):
Level at which the decoding process will start. Default to None.
bs_chunks (int, *optional*):
Number of chunks to process at the same time.
'''
pass
def _encode(self, raw_audio, start_level=0, end_level=None):
pass
def encode(self, input_audio, start_level=0, end_level=None, bs_chunks=1):
'''
Transforms the `input_audio` to a discrete representation made out of `music_tokens`.
Args:
input_audio (`torch.Tensor`):
Raw audio which will be encoded to its discrete representation using the codebook. The closest `code`
form the codebook will be computed for each sequence of samples.
start_level (`int`, *optional*, defaults to 0):
Level at which the encoding process will start. Default to 0.
end_level (`int`, *optional*):
Level at which the encoding process will start. Default to None.
bs_chunks (int, *optional*, defaults to 1):
Number of chunks of raw audio to process at the same time.
'''
pass
def sample(self, n_samples):
pass
def forward(self, raw_audio: torch.FloatTensor) -> tuple[torch.Tensor, torch.Tensor]:
'''
Forward pass of the VQ-VAE, encodes the `raw_audio` to latent states, which are then decoded for each level.
The commit loss, which ensure that the encoder's computed embeddings are close to the codebook vectors, is
computed.
Args:
raw_audio (`torch.FloatTensor`):
Audio input which will be encoded and decoded.
Returns:
`tuple[torch.Tensor, torch.Tensor]`
Example:
```python
>>> from transformers import JukeboxVQVAE, set_seed
>>> import torch
>>> model = JukeboxVQVAE.from_pretrained("openai/jukebox-1b-lyrics").eval()
>>> set_seed(0)
>>> zs = [torch.randint(100, (4, 1))]
>>> model.decode(zs).shape
torch.Size([4, 8, 1])
```
'''
pass
| 10
| 3
| 22
| 2
| 13
| 6
| 3
| 0.47
| 1
| 11
| 6
| 0
| 8
| 11
| 8
| 137
| 183
| 24
| 109
| 58
| 100
| 51
| 95
| 58
| 86
| 7
| 2
| 2
| 24
|
1,788
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/jukebox/tokenization_jukebox.py
|
transformers.models.deprecated.jukebox.tokenization_jukebox.JukeboxTokenizer
|
from typing import Any, Optional, Union
import unicodedata
from json.encoder import INFINITY
import json
from ....utils.generic import _is_numpy
from ....utils import TensorType, is_torch_available, logging
import regex
import os
from ....tokenization_utils_base import BatchEncoding
from ....tokenization_utils import AddedToken, PreTrainedTokenizer
import numpy as np
import re
class JukeboxTokenizer(PreTrainedTokenizer):
"""
Constructs a Jukebox tokenizer. Jukebox can be conditioned on 3 different inputs :
- Artists, unique ids are associated to each artist from the provided dictionary.
- Genres, unique ids are associated to each genre from the provided dictionary.
- Lyrics, character based tokenization. Must be initialized with the list of characters that are inside the
vocabulary.
This tokenizer does not require training. It should be able to process a different number of inputs:
as the conditioning of the model can be done on the three different queries. If None is provided, defaults values will be used.:
Depending on the number of genres on which the model should be conditioned (`n_genres`).
```python
>>> from transformers import JukeboxTokenizer
>>> tokenizer = JukeboxTokenizer.from_pretrained("openai/jukebox-1b-lyrics")
>>> tokenizer("Alan Jackson", "Country Rock", "old town road")["input_ids"]
[tensor([[ 0, 0, 0, 6785, 546, 41, 38, 30, 76, 46, 41, 49,
40, 76, 44, 41, 27, 30]]), tensor([[ 0, 0, 0, 145, 0]]), tensor([[ 0, 0, 0, 145, 0]])]
```
You can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer or when you
call it on some text, but since the model was not pretrained this way, it might yield a decrease in performance.
<Tip>
If nothing is provided, the genres and the artist will either be selected randomly or set to None
</Tip>
This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to:
this superclass for more information regarding those methods.
However the code does not allow that and only supports composing from various genres.
Args:
artists_file (`str`):
Path to the vocabulary file which contains a mapping between artists and ids. The default file supports
both "v2" and "v3"
genres_file (`str`):
Path to the vocabulary file which contain a mapping between genres and ids.
lyrics_file (`str`):
Path to the vocabulary file which contains the accepted characters for the lyrics tokenization.
version (`list[str]`, `optional`, default to `["v3", "v2", "v2"]`) :
List of the tokenizer versions. The `5b-lyrics`'s top level prior model was trained using `v3` instead of
`v2`.
n_genres (`int`, `optional`, defaults to 1):
Maximum number of genres to use for composition.
max_n_lyric_tokens (`int`, `optional`, defaults to 512):
Maximum number of lyric tokens to keep.
unk_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
"""
vocab_files_names = VOCAB_FILES_NAMES
model_input_names = ['input_ids', 'attention_mask']
def __init__(self, artists_file, genres_file, lyrics_file, version=['v3', 'v2', 'v2'], max_n_lyric_tokens=512, n_genres=5, unk_token='<|endoftext|>', **kwargs):
unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token
self.version = version
self.max_n_lyric_tokens = max_n_lyric_tokens
self.n_genres = n_genres
self._added_tokens_decoder = {0: unk_token}
with open(artists_file, encoding='utf-8') as vocab_handle:
self.artists_encoder = json.load(vocab_handle)
with open(genres_file, encoding='utf-8') as vocab_handle:
self.genres_encoder = json.load(vocab_handle)
with open(lyrics_file, encoding='utf-8') as vocab_handle:
self.lyrics_encoder = json.load(vocab_handle)
oov = '[^A-Za-z0-9.,:;!?\\-\'\\"()\\[\\] \\t\\n]+'
if len(self.lyrics_encoder) == 79:
oov = oov.replace("\\-'", "\\-+'")
self.out_of_vocab = regex.compile(oov)
self.artists_decoder = {v: k for k, v in self.artists_encoder.items()}
self.genres_decoder = {v: k for k, v in self.genres_encoder.items()}
self.lyrics_decoder = {v: k for k, v in self.lyrics_encoder.items()}
super().__init__(unk_token=unk_token, n_genres=n_genres, version=version, max_n_lyric_tokens=max_n_lyric_tokens, **kwargs)
@property
def vocab_size(self):
return len(self.artists_encoder) + len(self.genres_encoder) + len(self.lyrics_encoder)
def get_vocab(self):
return {'artists_encoder': self.artists_encoder, 'genres_encoder': self.genres_encoder, 'lyrics_encoder': self.lyrics_encoder}
def _convert_token_to_id(self, list_artists, list_genres, list_lyrics):
"""Converts the artist, genre and lyrics tokens to their index using the vocabulary.
The total_length, offset and duration have to be provided in order to select relevant lyrics and add padding to
the lyrics token sequence.
"""
artists_id = [self.artists_encoder.get(artist, 0) for artist in list_artists]
for genres in range(len(list_genres)):
list_genres[genres] = [self.genres_encoder.get(genre, 0) for genre in list_genres[genres]]
list_genres[genres] = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres]))
lyric_ids = [[self.lyrics_encoder.get(character, 0) for character in list_lyrics[0]], [], []]
return (artists_id, list_genres, lyric_ids)
def _tokenize(self, lyrics):
"""
Converts a string into a sequence of tokens (string), using the tokenizer. Split in words for word-based
vocabulary or sub-words for sub-word-based vocabularies (BPE/SentencePieces/WordPieces).
Do NOT take care of added tokens. Only the lyrics are split into character for the character-based vocabulary.
"""
return list(lyrics)
def tokenize(self, artist, genre, lyrics, **kwargs):
"""
Converts three strings in a 3 sequence of tokens using the tokenizer
"""
artist, genre, lyrics = self.prepare_for_tokenization(artist, genre, lyrics)
lyrics = self._tokenize(lyrics)
return (artist, genre, lyrics)
def prepare_for_tokenization(self, artists: str, genres: str, lyrics: str, is_split_into_words: bool=False) -> tuple[str, str, str, dict[str, Any]]:
"""
Performs any necessary transformations before tokenization.
Args:
artist (`str`):
The artist name to prepare. This will mostly lower the string
genres (`str`):
The genre name to prepare. This will mostly lower the string.
lyrics (`str`):
The lyrics to prepare.
is_split_into_words (`bool`, *optional*, defaults to `False`):
Whether or not the input is already pre-tokenized (e.g., split into words). If set to `True`, the
tokenizer assumes the input is already split into words (for instance, by splitting it on whitespace)
which it will tokenize. This is useful for NER or token classification.
"""
for idx in range(len(self.version)):
if self.version[idx] == 'v3':
artists[idx] = artists[idx].lower()
genres[idx] = [genres[idx].lower()]
else:
artists[idx] = self._normalize(artists[idx]) + '.v2'
genres[idx] = [self._normalize(genre) + '.v2' for genre in genres[idx].split('_')]
if self.version[0] == 'v2':
self.out_of_vocab = regex.compile('[^A-Za-z0-9.,:;!?\\-\'\\"()\\[\\] \\t\\n]+')
vocab = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+\'"()[] \t\n'
self.vocab = {vocab[index]: index + 1 for index in range(len(vocab))}
self.vocab['<unk>'] = 0
self.n_vocab = len(vocab) + 1
self.lyrics_encoder = self.vocab
self.lyrics_decoder = {v: k for k, v in self.vocab.items()}
self.lyrics_decoder[0] = ''
else:
self.out_of_vocab = regex.compile('[^A-Za-z0-9.,:;!?\\-+\'\\"()\\[\\] \\t\\n]+')
lyrics = self._run_strip_accents(lyrics)
lyrics = lyrics.replace('\\', '\n')
lyrics = (self.out_of_vocab.sub('', lyrics), [], [])
return (artists, genres, lyrics)
def _run_strip_accents(self, text):
"""Strips accents from a piece of text."""
text = unicodedata.normalize('NFD', text)
output = []
for char in text:
cat = unicodedata.category(char)
if cat == 'Mn':
continue
output.append(char)
return ''.join(output)
def _normalize(self, text: str) -> str:
"""
Normalizes the input text. This process is for the genres and the artist
Args:
text (`str`):
Artist or Genre string to normalize
"""
accepted = [chr(i) for i in range(ord('a'), ord('z') + 1)] + [chr(i) for i in range(ord('A'), ord('Z') + 1)] + [chr(i) for i in range(ord('0'), ord('9') + 1)] + ['.']
accepted = frozenset(accepted)
pattern = re.compile('_+')
text = ''.join([c if c in accepted else '_' for c in text.lower()])
text = pattern.sub('_', text).strip('_')
return text
def convert_lyric_tokens_to_string(self, lyrics: list[str]) -> str:
return ' '.join(lyrics)
def convert_to_tensors(self, inputs, tensor_type: Optional[Union[str, TensorType]]=None, prepend_batch_axis: bool=False):
"""
Convert the inner content to tensors.
Args:
tensor_type (`str` or [`~utils.TensorType`], *optional*):
The type of tensors to use. If `str`, should be one of the values of the enum [`~utils.TensorType`]. If
unset, no modification is done.
prepend_batch_axis (`int`, *optional*, defaults to `False`):
Whether or not to add the batch dimension during the conversion.
"""
if not isinstance(tensor_type, TensorType):
tensor_type = TensorType(tensor_type)
if tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError('Unable to convert output to PyTorch tensors format, PyTorch is not installed.')
import torch
as_tensor = torch.tensor
is_tensor = torch.is_tensor
else:
as_tensor = np.asarray
is_tensor = _is_numpy
try:
if prepend_batch_axis:
inputs = [inputs]
if not is_tensor(inputs):
inputs = as_tensor(inputs)
except:
raise ValueError("Unable to create tensor, you should probably activate truncation and/or padding with 'padding=True' 'truncation=True' to have batched tensors with the same length.")
return inputs
def __call__(self, artist, genres, lyrics='', return_tensors='pt') -> BatchEncoding:
"""Convert the raw string to a list of token ids
Args:
artist (`str`):
Name of the artist.
genres (`str`):
List of genres that will be mixed to condition the audio
lyrics (`str`, *optional*, defaults to `""`):
Lyrics used to condition the generation
"""
input_ids = [0, 0, 0]
artist = [artist] * len(self.version)
genres = [genres] * len(self.version)
artists_tokens, genres_tokens, lyrics_tokens = self.tokenize(artist, genres, lyrics)
artists_id, genres_ids, full_tokens = self._convert_token_to_id(artists_tokens, genres_tokens, lyrics_tokens)
attention_masks = [-INFINITY] * len(full_tokens[-1])
input_ids = [self.convert_to_tensors([input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]], tensor_type=return_tensors) for i in range(len(self.version))]
return BatchEncoding({'input_ids': input_ids, 'attention_masks': attention_masks})
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]:
"""
Saves the tokenizer's vocabulary dictionary to the provided save_directory.
Args:
save_directory (`str`):
A path to the directory where to saved. It will be created if it doesn't exist.
filename_prefix (`Optional[str]`, *optional*):
A prefix to add to the names of the files saved by the tokenizer.
"""
if not os.path.isdir(save_directory):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
artists_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['artists_file'])
with open(artists_file, 'w', encoding='utf-8') as f:
f.write(json.dumps(self.artists_encoder, ensure_ascii=False))
genres_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['genres_file'])
with open(genres_file, 'w', encoding='utf-8') as f:
f.write(json.dumps(self.genres_encoder, ensure_ascii=False))
lyrics_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['lyrics_file'])
with open(lyrics_file, 'w', encoding='utf-8') as f:
f.write(json.dumps(self.lyrics_encoder, ensure_ascii=False))
return (artists_file, genres_file, lyrics_file)
def _convert_id_to_token(self, artists_index, genres_index, lyric_index):
"""
Converts an index (integer) in a token (str) using the vocab.
Args:
artists_index (`int`):
Index of the artist in its corresponding dictionary.
genres_index (`Union[list[int], int]`):
Index of the genre in its corresponding dictionary.
lyric_index (`list[int]`):
List of character indices, which each correspond to a character.
"""
artist = self.artists_decoder.get(artists_index)
genres = [self.genres_decoder.get(genre) for genre in genres_index]
lyrics = [self.lyrics_decoder.get(character) for character in lyric_index]
return (artist, genres, lyrics)
|
class JukeboxTokenizer(PreTrainedTokenizer):
'''
Constructs a Jukebox tokenizer. Jukebox can be conditioned on 3 different inputs :
- Artists, unique ids are associated to each artist from the provided dictionary.
- Genres, unique ids are associated to each genre from the provided dictionary.
- Lyrics, character based tokenization. Must be initialized with the list of characters that are inside the
vocabulary.
This tokenizer does not require training. It should be able to process a different number of inputs:
as the conditioning of the model can be done on the three different queries. If None is provided, defaults values will be used.:
Depending on the number of genres on which the model should be conditioned (`n_genres`).
```python
>>> from transformers import JukeboxTokenizer
>>> tokenizer = JukeboxTokenizer.from_pretrained("openai/jukebox-1b-lyrics")
>>> tokenizer("Alan Jackson", "Country Rock", "old town road")["input_ids"]
[tensor([[ 0, 0, 0, 6785, 546, 41, 38, 30, 76, 46, 41, 49,
40, 76, 44, 41, 27, 30]]), tensor([[ 0, 0, 0, 145, 0]]), tensor([[ 0, 0, 0, 145, 0]])]
```
You can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer or when you
call it on some text, but since the model was not pretrained this way, it might yield a decrease in performance.
<Tip>
If nothing is provided, the genres and the artist will either be selected randomly or set to None
</Tip>
This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to:
this superclass for more information regarding those methods.
However the code does not allow that and only supports composing from various genres.
Args:
artists_file (`str`):
Path to the vocabulary file which contains a mapping between artists and ids. The default file supports
both "v2" and "v3"
genres_file (`str`):
Path to the vocabulary file which contain a mapping between genres and ids.
lyrics_file (`str`):
Path to the vocabulary file which contains the accepted characters for the lyrics tokenization.
version (`list[str]`, `optional`, default to `["v3", "v2", "v2"]`) :
List of the tokenizer versions. The `5b-lyrics`'s top level prior model was trained using `v3` instead of
`v2`.
n_genres (`int`, `optional`, defaults to 1):
Maximum number of genres to use for composition.
max_n_lyric_tokens (`int`, `optional`, defaults to 512):
Maximum number of lyric tokens to keep.
unk_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
'''
def __init__(self, artists_file, genres_file, lyrics_file, version=['v3', 'v2', 'v2'], max_n_lyric_tokens=512, n_genres=5, unk_token='<|endoftext|>', **kwargs):
pass
@property
def vocab_size(self):
pass
def get_vocab(self):
pass
def _convert_token_to_id(self, list_artists, list_genres, list_lyrics):
'''Converts the artist, genre and lyrics tokens to their index using the vocabulary.
The total_length, offset and duration have to be provided in order to select relevant lyrics and add padding to
the lyrics token sequence.
'''
pass
def _tokenize(self, lyrics):
'''
Converts a string into a sequence of tokens (string), using the tokenizer. Split in words for word-based
vocabulary or sub-words for sub-word-based vocabularies (BPE/SentencePieces/WordPieces).
Do NOT take care of added tokens. Only the lyrics are split into character for the character-based vocabulary.
'''
pass
def tokenize(self, artist, genre, lyrics, **kwargs):
'''
Converts three strings in a 3 sequence of tokens using the tokenizer
'''
pass
def prepare_for_tokenization(self, artists: str, genres: str, lyrics: str, is_split_into_words: bool=False) -> tuple[str, str, str, dict[str, Any]]:
'''
Performs any necessary transformations before tokenization.
Args:
artist (`str`):
The artist name to prepare. This will mostly lower the string
genres (`str`):
The genre name to prepare. This will mostly lower the string.
lyrics (`str`):
The lyrics to prepare.
is_split_into_words (`bool`, *optional*, defaults to `False`):
Whether or not the input is already pre-tokenized (e.g., split into words). If set to `True`, the
tokenizer assumes the input is already split into words (for instance, by splitting it on whitespace)
which it will tokenize. This is useful for NER or token classification.
'''
pass
def _run_strip_accents(self, text):
'''Strips accents from a piece of text.'''
pass
def _normalize(self, text: str) -> str:
'''
Normalizes the input text. This process is for the genres and the artist
Args:
text (`str`):
Artist or Genre string to normalize
'''
pass
def convert_lyric_tokens_to_string(self, lyrics: list[str]) -> str:
pass
def convert_to_tensors(self, inputs, tensor_type: Optional[Union[str, TensorType]]=None, prepend_batch_axis: bool=False):
'''
Convert the inner content to tensors.
Args:
tensor_type (`str` or [`~utils.TensorType`], *optional*):
The type of tensors to use. If `str`, should be one of the values of the enum [`~utils.TensorType`]. If
unset, no modification is done.
prepend_batch_axis (`int`, *optional*, defaults to `False`):
Whether or not to add the batch dimension during the conversion.
'''
pass
def __call__(self, artist, genres, lyrics='', return_tensors='pt') -> BatchEncoding:
'''Convert the raw string to a list of token ids
Args:
artist (`str`):
Name of the artist.
genres (`str`):
List of genres that will be mixed to condition the audio
lyrics (`str`, *optional*, defaults to `""`):
Lyrics used to condition the generation
'''
pass
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]:
'''
Saves the tokenizer's vocabulary dictionary to the provided save_directory.
Args:
save_directory (`str`):
A path to the directory where to saved. It will be created if it doesn't exist.
filename_prefix (`Optional[str]`, *optional*):
A prefix to add to the names of the files saved by the tokenizer.
'''
pass
def _convert_id_to_token(self, artists_index, genres_index, lyric_index):
'''
Converts an index (integer) in a token (str) using the vocab.
Args:
artists_index (`int`):
Index of the artist in its corresponding dictionary.
genres_index (`Union[list[int], int]`):
Index of the genre in its corresponding dictionary.
lyric_index (`list[int]`):
List of character indices, which each correspond to a character.
'''
pass
| 16
| 11
| 21
| 2
| 13
| 6
| 3
| 0.63
| 1
| 10
| 1
| 0
| 14
| 13
| 14
| 103
| 363
| 57
| 189
| 73
| 156
| 120
| 136
| 56
| 118
| 11
| 3
| 2
| 37
|
1,789
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/mctct/configuration_mctct.py
|
transformers.models.deprecated.mctct.configuration_mctct.MCTCTConfig
|
from ....configuration_utils import PretrainedConfig
class MCTCTConfig(PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`MCTCTModel`]. It is used to instantiate an
M-CTC-T model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the M-CTC-T
[speechbrain/m-ctc-t-large](https://huggingface.co/speechbrain/m-ctc-t-large) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 8065):
Vocabulary size of the M-CTC-T model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`MCTCTModel`].
hidden_size (`int`, *optional*, defaults to 1536):
Dimension of the encoder layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 36):
Number of hidden layers in the Transformer encoder.
intermediate_size (`int`, *optional*, defaults to 6144):
Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 4):
Number of attention heads for each attention layer in the Transformer encoder.
attention_head_dim (`int`, *optional*, defaults to 384):
Dimensions of each attention head for each attention layer in the Transformer encoder.
max_position_embeddings (`int`, *optional*, defaults to 920):
The maximum sequence length that this model might ever be used with (after log-mel spectrogram extraction).
layer_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the layer normalization layers.
layerdrop (`float`, *optional*, defaults to 0.3):
The probability of dropping an encoder layer during training. The default 0.3 value is used in the original
implementation.
hidden_act (`str` or `function`, *optional*, defaults to `"relu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` are supported.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
hidden_dropout_prob (`float`, *optional*, defaults to 0.3):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.3):
The dropout ratio for the attention probabilities.
pad_token_id (`int`, *optional*, defaults to 1):
The tokenizer index of the pad token.
bos_token_id (`int`, *optional*, defaults to 0):
The tokenizer index of the bos token.
eos_token_id (`int`, *optional*, defaults to 2):
The tokenizer index of the eos token.
conv_glu_dim (`int`, *optional*, defaults to 1):
The dimension of the output of the `Conv1dSubsampler` layer in which GLU is applied on. Though the original
Flashlight code uses the value of 2, here it's adapted to 1 due to transposition differences.
conv_dropout (`int`, *optional*, defaults to 0.3):
The probability of randomly dropping the `Conv1dSubsampler` layer during training.
num_conv_layers (`int`, *optional*, defaults to 1):
Number of convolution layers before applying transformer encoder layers.
conv_kernel (`Sequence[int]`, *optional*, defaults to `(7,)`):
The kernel size of the 1D convolution applied before transformer layers. `len(conv_kernel)` must be equal
to `num_conv_layers`.
conv_stride (`Sequence[int]`, *optional*, defaults to `(3,)`):
The stride length of the 1D convolution applied before transformer layers. `len(conv_stride)` must be equal
to `num_conv_layers`.
input_feat_per_channel (`int`, *optional*, defaults to 80):
Feature dimensions of the channels of the input to the Conv1D layer.
input_channels (`int`, *optional*, defaults to 1):
Number of input channels of the input to the Conv1D layer.
conv_channels (`list[int]`, *optional*):
Channel sizes of intermediate Conv1D layers.
ctc_loss_reduction (`str`, *optional*, defaults to `"sum"`):
Specifies the reduction to apply to the output of `torch.nn.CTCLoss`. Only relevant when training an
instance of [`MCTCTForCTC`].
ctc_zero_infinity (`bool`, *optional*, defaults to `False`):
Whether to zero infinite losses and the associated gradients of `torch.nn.CTCLoss`. Infinite losses mainly
occur when the inputs are too short to be aligned to the targets. Only relevant when training an instance
of [`MCTCTForCTC`].
Example:
```python
>>> from transformers import MCTCTConfig, MCTCTModel
>>> # Initializing a M-CTC-T mctct-large style configuration
>>> configuration = MCTCTConfig()
>>> # Initializing a model (with random weights) from the mctct-large style configuration
>>> model = MCTCTModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = 'mctct'
def __init__(self, vocab_size=8065, hidden_size=1536, num_hidden_layers=36, intermediate_size=6144, num_attention_heads=4, attention_head_dim=384, max_position_embeddings=920, layer_norm_eps=1e-05, layerdrop=0.3, hidden_act='relu', initializer_range=0.02, hidden_dropout_prob=0.3, attention_probs_dropout_prob=0.3, pad_token_id=1, bos_token_id=0, eos_token_id=2, conv_glu_dim=1, conv_dropout=0.3, num_conv_layers=1, conv_kernel=(7,), conv_stride=(3,), input_feat_per_channel=80, input_channels=1, conv_channels=None, ctc_loss_reduction='sum', ctc_zero_infinity=False, **kwargs):
super().__init__(**kwargs, pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id)
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.intermediate_size = intermediate_size
self.num_attention_heads = num_attention_heads
self.attention_head_dim = attention_head_dim
self.max_position_embeddings = max_position_embeddings
self.layer_norm_eps = layer_norm_eps
self.layerdrop = layerdrop
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.pad_token_id = pad_token_id
self.bos_token_id = bos_token_id
self.eos_token_id = eos_token_id
self.conv_glu_dim = conv_glu_dim
self.conv_dropout = conv_dropout
self.num_conv_layers = num_conv_layers
self.input_feat_per_channel = input_feat_per_channel
self.input_channels = input_channels
self.conv_channels = conv_channels
self.ctc_loss_reduction = ctc_loss_reduction
self.ctc_zero_infinity = ctc_zero_infinity
self.conv_kernel = list(conv_kernel)
self.conv_stride = list(conv_stride)
if len(self.conv_kernel) != self.num_conv_layers:
raise ValueError(f'Configuration for convolutional module is incorrect. It is required that `len(config.conv_kernel)` == `config.num_conv_layers` but is `len(config.conv_kernel) = {len(self.conv_kernel)}`, `config.num_conv_layers = {self.num_conv_layers}`.')
|
class MCTCTConfig(PretrainedConfig):
'''
This is the configuration class to store the configuration of a [`MCTCTModel`]. It is used to instantiate an
M-CTC-T model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the M-CTC-T
[speechbrain/m-ctc-t-large](https://huggingface.co/speechbrain/m-ctc-t-large) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 8065):
Vocabulary size of the M-CTC-T model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`MCTCTModel`].
hidden_size (`int`, *optional*, defaults to 1536):
Dimension of the encoder layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 36):
Number of hidden layers in the Transformer encoder.
intermediate_size (`int`, *optional*, defaults to 6144):
Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 4):
Number of attention heads for each attention layer in the Transformer encoder.
attention_head_dim (`int`, *optional*, defaults to 384):
Dimensions of each attention head for each attention layer in the Transformer encoder.
max_position_embeddings (`int`, *optional*, defaults to 920):
The maximum sequence length that this model might ever be used with (after log-mel spectrogram extraction).
layer_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the layer normalization layers.
layerdrop (`float`, *optional*, defaults to 0.3):
The probability of dropping an encoder layer during training. The default 0.3 value is used in the original
implementation.
hidden_act (`str` or `function`, *optional*, defaults to `"relu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` are supported.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
hidden_dropout_prob (`float`, *optional*, defaults to 0.3):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.3):
The dropout ratio for the attention probabilities.
pad_token_id (`int`, *optional*, defaults to 1):
The tokenizer index of the pad token.
bos_token_id (`int`, *optional*, defaults to 0):
The tokenizer index of the bos token.
eos_token_id (`int`, *optional*, defaults to 2):
The tokenizer index of the eos token.
conv_glu_dim (`int`, *optional*, defaults to 1):
The dimension of the output of the `Conv1dSubsampler` layer in which GLU is applied on. Though the original
Flashlight code uses the value of 2, here it's adapted to 1 due to transposition differences.
conv_dropout (`int`, *optional*, defaults to 0.3):
The probability of randomly dropping the `Conv1dSubsampler` layer during training.
num_conv_layers (`int`, *optional*, defaults to 1):
Number of convolution layers before applying transformer encoder layers.
conv_kernel (`Sequence[int]`, *optional*, defaults to `(7,)`):
The kernel size of the 1D convolution applied before transformer layers. `len(conv_kernel)` must be equal
to `num_conv_layers`.
conv_stride (`Sequence[int]`, *optional*, defaults to `(3,)`):
The stride length of the 1D convolution applied before transformer layers. `len(conv_stride)` must be equal
to `num_conv_layers`.
input_feat_per_channel (`int`, *optional*, defaults to 80):
Feature dimensions of the channels of the input to the Conv1D layer.
input_channels (`int`, *optional*, defaults to 1):
Number of input channels of the input to the Conv1D layer.
conv_channels (`list[int]`, *optional*):
Channel sizes of intermediate Conv1D layers.
ctc_loss_reduction (`str`, *optional*, defaults to `"sum"`):
Specifies the reduction to apply to the output of `torch.nn.CTCLoss`. Only relevant when training an
instance of [`MCTCTForCTC`].
ctc_zero_infinity (`bool`, *optional*, defaults to `False`):
Whether to zero infinite losses and the associated gradients of `torch.nn.CTCLoss`. Infinite losses mainly
occur when the inputs are too short to be aligned to the targets. Only relevant when training an instance
of [`MCTCTForCTC`].
Example:
```python
>>> from transformers import MCTCTConfig, MCTCTModel
>>> # Initializing a M-CTC-T mctct-large style configuration
>>> configuration = MCTCTConfig()
>>> # Initializing a model (with random weights) from the mctct-large style configuration
>>> model = MCTCTModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```'''
def __init__(self, vocab_size=8065, hidden_size=1536, num_hidden_layers=36, intermediate_size=6144, num_attention_heads=4, attention_head_dim=384, max_position_embeddings=920, layer_norm_eps=1e-05, layerdrop=0.3, hidden_act='relu', initializer_range=0.02, hidden_dropout_prob=0.3, attention_probs_dropout_prob=0.3, pad_token_id=1, bos_token_id=0, eos_token_id=2, conv_glu_dim=1, conv_dropout=0.3, num_conv_layers=1, conv_kernel=(7,), conv_stride=(3,), input_feat_per_channel=80, input_channels=1, conv_channels=None, ctc_loss_reduction='sum', ctc_zero_infinity=False, **kwargs):
pass
| 2
| 1
| 67
| 2
| 64
| 1
| 2
| 1.21
| 1
| 3
| 0
| 0
| 1
| 26
| 1
| 33
| 158
| 12
| 66
| 58
| 35
| 80
| 32
| 29
| 30
| 2
| 2
| 1
| 2
|
1,790
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/mctct/feature_extraction_mctct.py
|
transformers.models.deprecated.mctct.feature_extraction_mctct.MCTCTFeatureExtractor
|
from typing import Optional, Union
from ....file_utils import PaddingStrategy, TensorType
from ....feature_extraction_utils import BatchFeature
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
import numpy as np
class MCTCTFeatureExtractor(SequenceFeatureExtractor):
"""
Constructs a M-CTC-T feature extractor.
This feature extractor inherits from [`~feature_extraction_sequence_utils.SequenceFeatureExtractor`] which contains
most of the main methods. Users should refer to this superclass for more information regarding those methods. This
code has been adapted from Flashlight's C++ code. For more information about the implementation, one can refer to
this [notebook](https://colab.research.google.com/drive/1GLtINkkhzms-IsdcGy_-tVCkv0qNF-Gt#scrollTo=pMCRGMmUC_an)
that takes the user step-by-step in the implementation.
Args:
feature_size (`int`, defaults to 80):
The feature dimension of the extracted features. This is the number of mel_frequency
sampling_rate (`int`, defaults to 16000):
The sampling rate at which the audio files should be digitalized expressed in hertz (Hz).
padding_value (`float`, defaults to 0.0):
The value that is used to fill the padding values.
hop_length (`int`, defaults to 10):
Number of audio samples between windows. Otherwise referred to as "shift" in many papers.
win_length (`int`, defaults to 25):
Number of ms per window
win_function (`str`, defaults to `"hamming_window"`):
Name for the window function used for windowing, must be accessible via `torch.{win_function}`
frame_signal_scale (`float`, defaults to 32768.0):
Constant multiplied in creating the frames before applying DFT.
preemphasis_coeff (`float`, defaults to 0.97):
Constant multiplied in applying Pre-emphasis before DFT.
mel_floor (`float` defaults to 1.0):
Minimum value of mel frequency banks.
normalize_means (`bool`, *optional*, defaults to `True`):
Whether or not to zero-mean normalize the extracted features.
normalize_vars (`bool`, *optional*, defaults to `True`):
Whether or not to unit-variance normalize the extracted features.
"""
model_input_names = ['input_features', 'attention_mask']
def __init__(self, feature_size=80, sampling_rate=16000, padding_value=0.0, hop_length=10, win_length=25, win_function='hamming_window', frame_signal_scale=32768.0, preemphasis_coeff=0.97, mel_floor=1.0, normalize_means=True, normalize_vars=True, return_attention_mask=False, **kwargs):
super().__init__(feature_size=feature_size, sampling_rate=sampling_rate, padding_value=padding_value, **kwargs)
self.feature_size = feature_size
self.sampling_rate = sampling_rate
self.padding_value = padding_value
self.hop_length = hop_length
self.win_length = win_length
self.frame_signal_scale = frame_signal_scale
self.preemphasis_coeff = preemphasis_coeff
self.mel_floor = mel_floor
self.normalize_means = normalize_means
self.normalize_vars = normalize_vars
self.win_function = win_function
self.return_attention_mask = return_attention_mask
self.sample_size = win_length * sampling_rate // 1000
self.sample_stride = hop_length * sampling_rate // 1000
self.n_fft = optimal_fft_length(self.sample_size)
self.n_freqs = self.n_fft // 2 + 1
def _extract_mfsc_features(self, one_waveform: np.ndarray) -> np.ndarray:
"""
Extracts MFSC Features for one waveform vector (unbatched). Adapted from Flashlight's C++ MFSC code.
"""
if self.win_function == 'hamming_window':
window = window_function(window_length=self.sample_size, name=self.win_function, periodic=False)
else:
window = window_function(window_length=self.sample_size, name=self.win_function)
fbanks = mel_filter_bank(num_frequency_bins=self.n_freqs, num_mel_filters=self.feature_size, min_frequency=0.0, max_frequency=self.sampling_rate / 2.0, sampling_rate=self.sampling_rate)
msfc_features = spectrogram(one_waveform * self.frame_signal_scale, window=window, frame_length=self.sample_size, hop_length=self.sample_stride, fft_length=self.n_fft, center=False, preemphasis=self.preemphasis_coeff, mel_filters=fbanks, mel_floor=self.mel_floor, log_mel='log')
return msfc_features.T
def _normalize_one(self, x, input_length, padding_value):
if self.normalize_means:
mean = x[:input_length].mean(axis=0)
x = np.subtract(x, mean)
if self.normalize_vars:
std = x[:input_length].std(axis=0)
x = np.divide(x, std)
if input_length < x.shape[0]:
x[input_length:] = padding_value
x = x.astype(np.float32)
return x
def normalize(self, input_features: list[np.ndarray], attention_mask: Optional[np.ndarray]=None) -> list[np.ndarray]:
lengths = attention_mask.sum(-1) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(x, n, self.padding_value) for x, n in zip(input_features, lengths)]
def __call__(self, raw_speech: Union[np.ndarray, list[float], list[np.ndarray], list[list[float]]], padding: Union[bool, str, PaddingStrategy]=False, max_length: Optional[int]=None, truncation: bool=False, pad_to_multiple_of: Optional[int]=None, return_attention_mask: Optional[bool]=None, return_tensors: Optional[Union[str, TensorType]]=None, sampling_rate: Optional[int]=None, **kwargs) -> BatchFeature:
"""
Main method to featurize and prepare for the model one or several sequence(s). sequences. It returns the
log-mel spectrogram of the input audio, as implemented in the original Flashlight MFSC feature extraction code.
Args:
raw_speech (`torch.Tensor`, `np.ndarray`, `list[float]`, `list[torch.Tensor]`, `list[np.ndarray]`, `list[list[float]]`):
The sequence or batch of sequences to be padded. Each sequence can be a tensor, a numpy array, a list
of float values, a list of tensors, a list of numpy arrays or a list of list of float values. Must be
mono channel audio, not stereo, i.e. single float per timestep.
padding (`bool`, `str` or [`~file_utils.PaddingStrategy`], *optional*, defaults to `False`):
Select a strategy to pad the returned sequences (according to the model's padding side and padding
index) among:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
sequence if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
max_length (`int`, *optional*):
Maximum length of the returned list and optionally padding length (see above).
truncation (`bool`):
Activates truncation to cut input sequences longer than *max_length* to *max_length*.
pad_to_multiple_of (`int`, *optional*):
If set will pad the sequence to a multiple of the provided value.
This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability
`>= 7.5` (Volta), or on TPUs which benefit from having sequence lengths be a multiple of 128.
return_attention_mask (`bool`, *optional*):
Whether to return the attention mask. If left to the default, will return the attention mask according
to the specific feature_extractor's default.
[What are attention masks?](../glossary#attention-mask)
return_tensors (`str` or [`~file_utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
sampling_rate (`int`, *optional*):
The sampling rate at which the `raw_speech` input was sampled. It is strongly recommended to pass
`sampling_rate` at the forward call to prevent silent errors.
padding_value (`float`, defaults to 0.0):
"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(f'The model corresponding to this feature extractor: {self} was trained using a sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with {self.sampling_rate} and not {sampling_rate}.')
else:
logger.warning('It is strongly recommended to pass the ``sampling_rate`` argument to this function. Failing to do so can result in silent errors that might be hard to debug.')
is_batched_numpy = isinstance(raw_speech, np.ndarray) and len(raw_speech.shape) > 1
if is_batched_numpy and len(raw_speech.shape) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}')
is_batched = is_batched_numpy or (isinstance(raw_speech, (list, tuple)) and isinstance(raw_speech[0], (np.ndarray, tuple, list)))
if is_batched:
raw_speech = [np.asarray(speech, dtype=np.float32) for speech in raw_speech]
elif not is_batched and (not isinstance(raw_speech, np.ndarray)):
raw_speech = np.asarray(raw_speech, dtype=np.float32)
elif isinstance(raw_speech, np.ndarray) and raw_speech.dtype is np.dtype(np.float64):
raw_speech = raw_speech.astype(np.float32)
if not is_batched:
raw_speech = [raw_speech]
features = [self._extract_mfsc_features(one_waveform) for one_waveform in raw_speech]
encoded_inputs = BatchFeature({'input_features': features})
padded_inputs = self.pad(encoded_inputs, padding=padding, max_length=max_length, truncation=truncation, pad_to_multiple_of=pad_to_multiple_of, return_attention_mask=True, **kwargs)
input_features = padded_inputs.get('input_features')
if isinstance(input_features[0], list):
padded_inputs['input_features'] = [np.asarray(feature, dtype=np.float32) for feature in input_features]
attention_mask = padded_inputs.get('attention_mask')
if attention_mask is not None:
padded_inputs['attention_mask'] = [np.asarray(array, dtype=np.int32) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
attention_mask = np.array(attention_mask, dtype=np.int32) if self._get_padding_strategies(padding, max_length=max_length) is not PaddingStrategy.DO_NOT_PAD and padding else None
padded_inputs['input_features'] = self.normalize(padded_inputs['input_features'], attention_mask=attention_mask)
if return_tensors is not None:
padded_inputs = padded_inputs.convert_to_tensors(return_tensors)
return padded_inputs
|
class MCTCTFeatureExtractor(SequenceFeatureExtractor):
'''
Constructs a M-CTC-T feature extractor.
This feature extractor inherits from [`~feature_extraction_sequence_utils.SequenceFeatureExtractor`] which contains
most of the main methods. Users should refer to this superclass for more information regarding those methods. This
code has been adapted from Flashlight's C++ code. For more information about the implementation, one can refer to
this [notebook](https://colab.research.google.com/drive/1GLtINkkhzms-IsdcGy_-tVCkv0qNF-Gt#scrollTo=pMCRGMmUC_an)
that takes the user step-by-step in the implementation.
Args:
feature_size (`int`, defaults to 80):
The feature dimension of the extracted features. This is the number of mel_frequency
sampling_rate (`int`, defaults to 16000):
The sampling rate at which the audio files should be digitalized expressed in hertz (Hz).
padding_value (`float`, defaults to 0.0):
The value that is used to fill the padding values.
hop_length (`int`, defaults to 10):
Number of audio samples between windows. Otherwise referred to as "shift" in many papers.
win_length (`int`, defaults to 25):
Number of ms per window
win_function (`str`, defaults to `"hamming_window"`):
Name for the window function used for windowing, must be accessible via `torch.{win_function}`
frame_signal_scale (`float`, defaults to 32768.0):
Constant multiplied in creating the frames before applying DFT.
preemphasis_coeff (`float`, defaults to 0.97):
Constant multiplied in applying Pre-emphasis before DFT.
mel_floor (`float` defaults to 1.0):
Minimum value of mel frequency banks.
normalize_means (`bool`, *optional*, defaults to `True`):
Whether or not to zero-mean normalize the extracted features.
normalize_vars (`bool`, *optional*, defaults to `True`):
Whether or not to unit-variance normalize the extracted features.
'''
def __init__(self, feature_size=80, sampling_rate=16000, padding_value=0.0, hop_length=10, win_length=25, win_function='hamming_window', frame_signal_scale=32768.0, preemphasis_coeff=0.97, mel_floor=1.0, normalize_means=True, normalize_vars=True, return_attention_mask=False, **kwargs):
pass
def _extract_mfsc_features(self, one_waveform: np.ndarray) -> np.ndarray:
'''
Extracts MFSC Features for one waveform vector (unbatched). Adapted from Flashlight's C++ MFSC code.
'''
pass
def _normalize_one(self, x, input_length, padding_value):
pass
def normalize(self, input_features: list[np.ndarray], attention_mask: Optional[np.ndarray]=None) -> list[np.ndarray]:
pass
def __call__(self, raw_speech: Union[np.ndarray, list[float], list[np.ndarray], list[list[float]]], padding: Union[bool, str, PaddingStrategy]=False, max_length: Optional[int]=None, truncation: bool=False, pad_to_multiple_of: Optional[int]=None, return_attention_mask: Optional[bool]=None, return_tensors: Optional[Union[str, TensorType]]=None, sampling_rate: Optional[int]=None, **kwargs) -> BatchFeature:
'''
Main method to featurize and prepare for the model one or several sequence(s). sequences. It returns the
log-mel spectrogram of the input audio, as implemented in the original Flashlight MFSC feature extraction code.
Args:
raw_speech (`torch.Tensor`, `np.ndarray`, `list[float]`, `list[torch.Tensor]`, `list[np.ndarray]`, `list[list[float]]`):
The sequence or batch of sequences to be padded. Each sequence can be a tensor, a numpy array, a list
of float values, a list of tensors, a list of numpy arrays or a list of list of float values. Must be
mono channel audio, not stereo, i.e. single float per timestep.
padding (`bool`, `str` or [`~file_utils.PaddingStrategy`], *optional*, defaults to `False`):
Select a strategy to pad the returned sequences (according to the model's padding side and padding
index) among:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
sequence if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
max_length (`int`, *optional*):
Maximum length of the returned list and optionally padding length (see above).
truncation (`bool`):
Activates truncation to cut input sequences longer than *max_length* to *max_length*.
pad_to_multiple_of (`int`, *optional*):
If set will pad the sequence to a multiple of the provided value.
This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability
`>= 7.5` (Volta), or on TPUs which benefit from having sequence lengths be a multiple of 128.
return_attention_mask (`bool`, *optional*):
Whether to return the attention mask. If left to the default, will return the attention mask according
to the specific feature_extractor's default.
[What are attention masks?](../glossary#attention-mask)
return_tensors (`str` or [`~file_utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
sampling_rate (`int`, *optional*):
The sampling rate at which the `raw_speech` input was sampled. It is strongly recommended to pass
`sampling_rate` at the forward call to prevent silent errors.
padding_value (`float`, defaults to 0.0):
'''
pass
| 6
| 3
| 43
| 5
| 28
| 10
| 4
| 0.55
| 1
| 10
| 1
| 0
| 5
| 16
| 5
| 22
| 256
| 33
| 144
| 64
| 110
| 79
| 71
| 36
| 65
| 13
| 3
| 2
| 22
|
1,791
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/mctct/modeling_mctct.py
|
transformers.models.deprecated.mctct.modeling_mctct.MCTCTAttention
|
from torch import nn
from ....pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
class MCTCTAttention(nn.Module):
def __init__(self, config):
super().__init__()
self.self = MCTCTSelfAttention(config)
self.output = MCTCTSelfOutput(config)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads)
self.self.query = prune_linear_layer(self.self.query, index)
self.self.key = prune_linear_layer(self.self.key, index)
self.self.value = prune_linear_layer(self.self.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(self, hidden_states, attention_mask=None, head_mask=None, output_attentions=False):
self_outputs = self.self(hidden_states, attention_mask, head_mask, output_attentions)
attention_output = self.output(self_outputs[0], hidden_states)
outputs = (attention_output,) + self_outputs[1:]
return outputs
|
class MCTCTAttention(nn.Module):
def __init__(self, config):
pass
def prune_heads(self, heads):
pass
def forward(self, hidden_states, attention_mask=None, head_mask=None, output_attentions=False):
pass
| 4
| 0
| 13
| 1
| 11
| 1
| 1
| 0.09
| 1
| 4
| 2
| 0
| 3
| 3
| 3
| 13
| 42
| 5
| 35
| 17
| 25
| 3
| 22
| 11
| 18
| 2
| 1
| 1
| 4
|
1,792
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/mctct/modeling_mctct.py
|
transformers.models.deprecated.mctct.modeling_mctct.MCTCTConv1dSubsampler
|
from torch import nn
import torch
class MCTCTConv1dSubsampler(nn.Module):
"""
Convolutional subsampler: a stack of 1D convolution (along temporal dimension) followed by non-linear activation
via gated linear units (https://huggingface.co/papers/1911.08460)
"""
def __init__(self, config):
super().__init__()
self.config = config
self.glu_dim = config.conv_glu_dim
self.dropout = nn.Dropout(config.conv_dropout)
self.num_layers = config.num_conv_layers
self.in_channels = config.input_feat_per_channel * config.input_channels
if self.num_layers > 1:
if config.conv_channels is None:
raise ValueError('Need to specify `conv_channels` configuration in `MCTCTConfig` to use multiple convolution layers.')
self.mid_channels = config.conv_channels
else:
self.mid_channels = None
self.out_channels = config.hidden_size * 2
self.kernel_size = config.conv_kernel
self.stride = config.conv_stride
self.conv_layers = nn.ModuleList((nn.Conv1d(self.in_channels if i == 0 else self.mid_channels[i], self.mid_channels[i] if i < self.num_layers - 1 else self.out_channels, kernel_size=k, stride=self.stride[i], padding='valid') for i, k in enumerate(self.kernel_size)))
def forward(self, input_features):
padding = sum([size // 2 for size in self.kernel_size])
input_features = torch.nn.functional.pad(input_features, (0, 0, padding, padding), 'constant', 0)
hidden_states = input_features.transpose(1, 2).contiguous()
for conv in self.conv_layers:
hidden_states = conv(hidden_states)
hidden_states = nn.functional.glu(hidden_states, dim=self.glu_dim)
hidden_states = self.dropout(hidden_states)
hidden_states = hidden_states.transpose(1, 2).contiguous()
return hidden_states
|
class MCTCTConv1dSubsampler(nn.Module):
'''
Convolutional subsampler: a stack of 1D convolution (along temporal dimension) followed by non-linear activation
via gated linear units (https://huggingface.co/papers/1911.08460)
'''
def __init__(self, config):
pass
def forward(self, input_features):
pass
| 3
| 1
| 26
| 4
| 20
| 5
| 4
| 0.33
| 1
| 3
| 0
| 0
| 2
| 10
| 2
| 12
| 59
| 10
| 40
| 16
| 37
| 13
| 27
| 16
| 24
| 5
| 1
| 2
| 7
|
1,793
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/mctct/modeling_mctct.py
|
transformers.models.deprecated.mctct.modeling_mctct.MCTCTEmbeddings
|
from torch import nn
import torch
class MCTCTEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings."""
def __init__(self, config):
super().__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
self.LayerNorm = MCTCTLayerNorm()
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.register_buffer('position_ids', torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False)
self.register_buffer('token_type_ids', torch.zeros(self.position_ids.size(), dtype=torch.long, device=self.position_ids.device), persistent=False)
def forward(self, input_features=None, token_type_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0):
input_shape = input_features.size() if input_features is not None else inputs_embeds.size()[:-1]
seq_length = input_shape[1]
if position_ids is None:
position_ids = self.position_ids[:, past_key_values_length:seq_length + past_key_values_length]
if token_type_ids is None:
if hasattr(self, 'token_type_ids'):
buffered_token_type_ids = self.token_type_ids[:, :seq_length]
buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length)
token_type_ids = buffered_token_type_ids_expanded
else:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_features)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
|
class MCTCTEmbeddings(nn.Module):
'''Construct the embeddings from word, position and token_type embeddings.'''
def __init__(self, config):
pass
def forward(self, input_features=None, token_type_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0):
pass
| 3
| 1
| 26
| 5
| 18
| 4
| 4
| 0.22
| 1
| 2
| 1
| 0
| 2
| 5
| 2
| 12
| 56
| 11
| 37
| 16
| 32
| 8
| 28
| 14
| 25
| 6
| 1
| 2
| 7
|
1,794
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/mctct/modeling_mctct.py
|
transformers.models.deprecated.mctct.modeling_mctct.MCTCTEncoder
|
from ....integrations.fsdp import is_fsdp_managed_module
from ....modeling_outputs import BaseModelOutput, CausalLMOutput
from ....modeling_attn_mask_utils import _prepare_4d_attention_mask
from typing import Optional, Union
import torch
from ....integrations.deepspeed import is_deepspeed_zero3_enabled
from torch import nn
from .configuration_mctct import MCTCTConfig
class MCTCTEncoder(MCTCTPreTrainedModel):
def __init__(self, config: MCTCTConfig):
super().__init__(config)
self.hidden_dropout_prob = config.hidden_dropout_prob
self.layer_norm = MCTCTLayerNorm()
self.conv = MCTCTConv1dSubsampler(config)
self.layers = nn.ModuleList([MCTCTLayer(config) for _ in range(config.num_hidden_layers)])
self.gradient_checkpointing = False
def forward(self, input_features: torch.Tensor, attention_mask: torch.Tensor, head_mask: torch.Tensor, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True) -> Union[tuple, BaseModelOutput]:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
input_features = self.layer_norm(input_features)
inputs_embeds = self.conv(input_features)
if attention_mask is not None:
attention_mask = self._get_feature_vector_attention_mask(inputs_embeds.shape[1], attention_mask)
hidden_states = nn.functional.dropout(inputs_embeds, p=self.hidden_dropout_prob, training=self.training)
if attention_mask is not None:
attention_mask = _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype)
encoder_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
if head_mask is not None:
if head_mask.size()[0] != len(self.layers):
raise ValueError(f'The head_mask should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}.')
synced_gpus = is_deepspeed_zero3_enabled() or is_fsdp_managed_module(self)
for idx, encoder_layer in enumerate(self.layers):
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
dropout_probability = torch.rand([])
skip_the_layer = self.training and dropout_probability < self.config.layerdrop
if not skip_the_layer or synced_gpus:
layer_outputs = encoder_layer(hidden_states=hidden_states, attention_mask=attention_mask, output_attentions=output_attentions)
hidden_states = layer_outputs[0]
if skip_the_layer:
layer_outputs = (None, None)
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
if not return_dict:
return tuple((v for v in [hidden_states, encoder_states, all_attentions] if v is not None))
return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions)
|
class MCTCTEncoder(MCTCTPreTrainedModel):
def __init__(self, config: MCTCTConfig):
pass
def forward(self, input_features: torch.Tensor, attention_mask: torch.Tensor, head_mask: torch.Tensor, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True) -> Union[tuple, BaseModelOutput]:
pass
| 3
| 0
| 46
| 9
| 35
| 3
| 11
| 0.09
| 1
| 12
| 5
| 0
| 2
| 5
| 2
| 134
| 94
| 18
| 70
| 25
| 59
| 6
| 44
| 17
| 41
| 20
| 3
| 3
| 21
|
1,795
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/mctct/modeling_mctct.py
|
transformers.models.deprecated.mctct.modeling_mctct.MCTCTForCTC
|
from torch import nn
import torch
from ....file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ....modeling_outputs import BaseModelOutput, CausalLMOutput
from typing import Optional, Union
@add_start_docstrings('MCTCT Model with a `language modeling` head on top for Connectionist Temporal Classification (CTC).', MCTCT_START_DOCSTRING)
class MCTCTForCTC(MCTCTPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.mctct = MCTCTModel(config)
if config.vocab_size is None:
raise ValueError(f"You are trying to instantiate {self.__class__} with a configuration that does not define the vocabulary size of the language model head. Please instantiate the model as follows: `MCTCTForCTC.from_pretrained(..., vocab_size=vocab_size)`. or define `vocab_size` of your model's configuration.")
output_hidden_size = config.hidden_size
self.ctc_head = nn.Linear(output_hidden_size, config.vocab_size)
self.post_init()
@add_start_docstrings_to_model_forward(MCTCT_INPUTS_DOCSTRING)
@add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=CausalLMOutput, config_class=_CONFIG_FOR_DOC, expected_output=_CTC_EXPECTED_OUTPUT, expected_loss=_CTC_EXPECTED_LOSS)
def forward(self, input_features: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, labels: Optional[torch.LongTensor]=None) -> Union[tuple, CausalLMOutput]:
"""
labels (`torch.LongTensor` of shape `(batch_size, target_length)`, *optional*):
Labels for connectionist temporal classification. Note that `target_length` has to be smaller or equal to
the sequence length of the output logits. Indices are selected in `[-100, 0, ..., config.vocab_size - 1]`.
All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ...,
config.vocab_size - 1]`.
"""
if labels is not None and labels.max() >= self.config.vocab_size:
raise ValueError(f'Label values must be <= vocab_size: {self.config.vocab_size}')
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.mctct(input_features, attention_mask=attention_mask, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
hidden_states = outputs[0]
logits = self.ctc_head(hidden_states)
loss = None
if labels is not None:
attention_mask = attention_mask if attention_mask is not None else torch.ones(input_features.shape[:-1], dtype=torch.long)
input_lengths = self._get_feat_extract_output_lengths(attention_mask.sum(-1)).to(torch.long)
labels_mask = labels >= 0
target_lengths = labels_mask.sum(-1)
flattened_targets = labels.masked_select(labels_mask)
log_probs = nn.functional.log_softmax(logits, dim=-1, dtype=torch.float32).transpose(0, 1)
with torch.backends.cudnn.flags(enabled=False):
loss = nn.functional.ctc_loss(log_probs, flattened_targets, input_lengths, target_lengths, blank=self.config.pad_token_id, reduction=self.config.ctc_loss_reduction, zero_infinity=self.config.ctc_zero_infinity)
if not return_dict:
output = (logits,) + outputs[_HIDDEN_STATES_START_POSITION:]
return (loss,) + output if loss is not None else output
return CausalLMOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
|
@add_start_docstrings('MCTCT Model with a `language modeling` head on top for Connectionist Temporal Classification (CTC).', MCTCT_START_DOCSTRING)
class MCTCTForCTC(MCTCTPreTrainedModel):
def __init__(self, config):
pass
@add_start_docstrings_to_model_forward(MCTCT_INPUTS_DOCSTRING)
@add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=CausalLMOutput, config_class=_CONFIG_FOR_DOC, expected_output=_CTC_EXPECTED_OUTPUT, expected_loss=_CTC_EXPECTED_LOSS)
def forward(self, input_features: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, labels: Optional[torch.LongTensor]=None) -> Union[tuple, CausalLMOutput]:
'''
labels (`torch.LongTensor` of shape `(batch_size, target_length)`, *optional*):
Labels for connectionist temporal classification. Note that `target_length` has to be smaller or equal to
the sequence length of the output logits. Indices are selected in `[-100, 0, ..., config.vocab_size - 1]`.
All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ...,
config.vocab_size - 1]`.
'''
pass
| 6
| 1
| 44
| 6
| 32
| 6
| 5
| 0.16
| 1
| 6
| 2
| 0
| 2
| 2
| 2
| 134
| 98
| 13
| 73
| 26
| 53
| 12
| 30
| 16
| 27
| 7
| 3
| 2
| 9
|
1,796
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/mctct/modeling_mctct.py
|
transformers.models.deprecated.mctct.modeling_mctct.MCTCTIntermediate
|
from torch import nn
from ....activations import ACT2FN
class MCTCTIntermediate(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size, bias=False)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
|
class MCTCTIntermediate(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states):
pass
| 3
| 0
| 6
| 0
| 6
| 0
| 2
| 0
| 1
| 2
| 0
| 0
| 2
| 2
| 2
| 12
| 13
| 1
| 12
| 5
| 9
| 0
| 11
| 5
| 8
| 2
| 1
| 1
| 3
|
1,797
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/mctct/modeling_mctct.py
|
transformers.models.deprecated.mctct.modeling_mctct.MCTCTLayer
|
from .configuration_mctct import MCTCTConfig
from ....modeling_layers import GradientCheckpointingLayer
from ....pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
class MCTCTLayer(GradientCheckpointingLayer):
def __init__(self, config: MCTCTConfig):
super().__init__()
self.seq_len_dim = 1
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.intermediate = MCTCTIntermediate(config)
self.attention = MCTCTAttention(config)
self.is_decoder = config.is_decoder
self.output = MCTCTOutput(config)
def forward(self, hidden_states, attention_mask=None, head_mask=None, output_attentions=False):
self_attention_outputs = self.attention(hidden_states, attention_mask, head_mask, output_attentions=output_attentions)
attention_output = self_attention_outputs[0]
outputs = self_attention_outputs[1:]
layer_output = apply_chunking_to_forward(self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output)
outputs = (layer_output,) + outputs
return outputs
def feed_forward_chunk(self, attention_output):
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
|
class MCTCTLayer(GradientCheckpointingLayer):
def __init__(self, config: MCTCTConfig):
pass
def forward(self, hidden_states, attention_mask=None, head_mask=None, output_attentions=False):
pass
def feed_forward_chunk(self, attention_output):
pass
| 4
| 0
| 11
| 2
| 10
| 0
| 1
| 0.03
| 1
| 5
| 4
| 0
| 3
| 6
| 3
| 13
| 37
| 7
| 30
| 22
| 20
| 1
| 20
| 16
| 16
| 1
| 1
| 0
| 3
|
1,798
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/mctct/modeling_mctct.py
|
transformers.models.deprecated.mctct.modeling_mctct.MCTCTLayerNorm
|
import torch
from torch import nn
class MCTCTLayerNorm(nn.Module):
def __init__(self):
super().__init__()
self.singleton_weight = nn.Parameter(torch.ones(1))
self.singleton_bias = nn.Parameter(torch.zeros(1))
def forward(self, hidden_states):
return hidden_states * self.singleton_weight + self.singleton_bias
|
class MCTCTLayerNorm(nn.Module):
def __init__(self):
pass
def forward(self, hidden_states):
pass
| 3
| 0
| 3
| 0
| 3
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 2
| 2
| 2
| 12
| 8
| 1
| 7
| 5
| 4
| 0
| 7
| 5
| 4
| 1
| 1
| 0
| 2
|
1,799
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/deprecated/mctct/modeling_mctct.py
|
transformers.models.deprecated.mctct.modeling_mctct.MCTCTModel
|
import torch
from typing import Optional, Union
from ....file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ....modeling_outputs import BaseModelOutput, CausalLMOutput
@add_start_docstrings('The bare M-CTC-T Model transformer outputting raw hidden-states without any specific head on top.', MCTCT_START_DOCSTRING)
class MCTCTModel(MCTCTPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.config = config
self.encoder = MCTCTEncoder(config)
self.post_init()
@add_start_docstrings_to_model_forward(MCTCT_INPUTS_DOCSTRING.format('batch_size, sequence_length'))
@add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutput, config_class=_CONFIG_FOR_DOC, modality='audio', expected_output=_EXPECTED_OUTPUT_SHAPE)
def forward(self, input_features: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutput]:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_features is None:
raise ValueError('You have to specify input_features.')
encoder_outputs = self.encoder(input_features, attention_mask=attention_mask, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
sequence_output = encoder_outputs[0]
if not return_dict:
return (sequence_output,) + encoder_outputs[1:]
return BaseModelOutput(last_hidden_state=sequence_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions)
|
@add_start_docstrings('The bare M-CTC-T Model transformer outputting raw hidden-states without any specific head on top.', MCTCT_START_DOCSTRING)
class MCTCTModel(MCTCTPreTrainedModel):
def __init__(self, config):
pass
@add_start_docstrings_to_model_forward(MCTCT_INPUTS_DOCSTRING.format('batch_size, sequence_length'))
@add_code_sample_docstrings(checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutput, config_class=_CONFIG_FOR_DOC, modality='audio', expected_output=_EXPECTED_OUTPUT_SHAPE)
def forward(self, input_features: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutput]:
pass
| 6
| 0
| 22
| 3
| 19
| 1
| 4
| 0.02
| 1
| 6
| 2
| 0
| 2
| 2
| 2
| 134
| 54
| 7
| 46
| 16
| 27
| 1
| 17
| 7
| 14
| 6
| 3
| 1
| 7
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.