id
int64 0
328k
| repository_name
stringlengths 7
58
| file_path
stringlengths 9
302
| class_name
stringlengths 5
256
| human_written_code
stringlengths 16
2.16M
| class_skeleton
stringlengths 18
1.49M
⌀ | total_program_units
int64 1
1.76k
| total_doc_str
int64 0
771
| AvgCountLine
float64 0
7.89k
| AvgCountLineBlank
float64 0
297
| AvgCountLineCode
float64 0
7.89k
| AvgCountLineComment
float64 0
7.89k
| AvgCyclomatic
float64 0
130
| CommentToCodeRatio
float64 0
168
| CountClassBase
float64 0
40
| CountClassCoupled
float64 0
583
| CountClassCoupledModified
float64 0
575
| CountClassDerived
float64 0
5.35k
| CountDeclInstanceMethod
float64 0
529
| CountDeclInstanceVariable
float64 0
296
| CountDeclMethod
float64 0
599
| CountDeclMethodAll
float64 0
1.12k
| CountLine
float64 1
40.4k
| CountLineBlank
float64 0
8.16k
| CountLineCode
float64 1
25.7k
| CountLineCodeDecl
float64 1
8.15k
| CountLineCodeExe
float64 0
24.2k
| CountLineComment
float64 0
16.5k
| CountStmt
float64 1
9.71k
| CountStmtDecl
float64 1
8.15k
| CountStmtExe
float64 0
9.69k
| MaxCyclomatic
float64 0
759
| MaxInheritanceTree
float64 0
16
| MaxNesting
float64 0
34
| SumCyclomatic
float64 0
2.9k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6,300
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/yolos/modeling_yolos.py
|
transformers.models.yolos.modeling_yolos.YolosModel
|
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling
from typing import Callable, Optional, Union
import torch
from .configuration_yolos import YolosConfig
from ...utils import ModelOutput, TransformersKwargs, auto_docstring, logging
from ...utils.generic import can_return_tuple, check_model_inputs
from ...processing_utils import Unpack
from torch import nn
@auto_docstring
class YolosModel(YolosPreTrainedModel):
def __init__(self, config: YolosConfig, add_pooling_layer: bool=True):
"""
add_pooling_layer (bool, *optional*, defaults to `True`):
Whether to add a pooling layer
"""
super().__init__(config)
self.config = config
self.embeddings = YolosEmbeddings(config)
self.encoder = YolosEncoder(config)
self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.pooler = YolosPooler(config) if add_pooling_layer else None
self.post_init()
def get_input_embeddings(self) -> YolosPatchEmbeddings:
return self.embeddings.patch_embeddings
def _prune_heads(self, heads_to_prune: dict[int, list[int]]) -> None:
"""
Prunes heads of the model.
Args:
heads_to_prune (`dict`):
See base class `PreTrainedModel`. The input dictionary must have the following format: {layer_num:
list of heads to prune in this layer}
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@check_model_inputs
@auto_docstring
def forward(self, pixel_values: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, **kwargs: Unpack[TransformersKwargs]) -> BaseModelOutputWithPooling:
if pixel_values is None:
raise ValueError('You have to specify pixel_values')
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
embedding_output = self.embeddings(pixel_values)
height, width = pixel_values.shape[-2:]
encoder_outputs: BaseModelOutput = self.encoder(embedding_output, height=height, width=width, head_mask=head_mask)
sequence_output = encoder_outputs.last_hidden_state
sequence_output = self.layernorm(sequence_output)
pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
return BaseModelOutputWithPooling(last_hidden_state=sequence_output, pooler_output=pooled_output)
|
@auto_docstring
class YolosModel(YolosPreTrainedModel):
def __init__(self, config: YolosConfig, add_pooling_layer: bool=True):
'''
add_pooling_layer (bool, *optional*, defaults to `True`):
Whether to add a pooling layer
'''
pass
def get_input_embeddings(self) -> YolosPatchEmbeddings:
pass
def _prune_heads(self, heads_to_prune: dict[int, list[int]]) -> None:
'''
Prunes heads of the model.
Args:
heads_to_prune (`dict`):
See base class `PreTrainedModel`. The input dictionary must have the following format: {layer_num:
list of heads to prune in this layer}
'''
pass
@check_model_inputs
@auto_docstring
def forward(self, pixel_values: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, **kwargs: Unpack[TransformersKwargs]) -> BaseModelOutputWithPooling:
pass
| 8
| 2
| 19
| 3
| 13
| 3
| 3
| 0.22
| 1
| 11
| 6
| 0
| 4
| 5
| 4
| 5
| 86
| 13
| 60
| 24
| 40
| 13
| 30
| 16
| 25
| 8
| 2
| 1
| 13
|
6,301
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/yolos/modeling_yolos.py
|
transformers.models.yolos.modeling_yolos.YolosObjectDetectionOutput
|
import torch
from typing import Callable, Optional, Union
from dataclasses import dataclass
from ...utils import ModelOutput, TransformersKwargs, auto_docstring, logging
@dataclass
@auto_docstring(custom_intro='\n Output type of [`YolosForObjectDetection`].\n ')
class YolosObjectDetectionOutput(ModelOutput):
"""
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` are provided)):
Total loss as a linear combination of a negative log-likehood (cross-entropy) for class prediction and a
bounding box loss. The latter is defined as a linear combination of the L1 loss and the generalized
scale-invariant IoU loss.
loss_dict (`Dict`, *optional*):
A dictionary containing the individual losses. Useful for logging.
logits (`torch.FloatTensor` of shape `(batch_size, num_queries, num_classes + 1)`):
Classification logits (including no-object) for all queries.
pred_boxes (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)`):
Normalized boxes coordinates for all queries, represented as (center_x, center_y, width, height). These
values are normalized in [0, 1], relative to the size of each individual image in the batch (disregarding
possible padding). You can use [`~YolosImageProcessor.post_process`] to retrieve the unnormalized bounding
boxes.
auxiliary_outputs (`list[Dict]`, *optional*):
Optional, only returned when auxiliary losses are activated (i.e. `config.auxiliary_loss` is set to `True`)
and labels are provided. It is a list of dictionaries containing the two above keys (`logits` and
`pred_boxes`) for each decoder layer.
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the decoder of the model.
"""
loss: Optional[torch.FloatTensor] = None
loss_dict: Optional[dict] = None
logits: Optional[torch.FloatTensor] = None
pred_boxes: Optional[torch.FloatTensor] = None
auxiliary_outputs: Optional[list[dict]] = None
last_hidden_state: Optional[torch.FloatTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor]] = None
attentions: Optional[tuple[torch.FloatTensor]] = None
|
@dataclass
@auto_docstring(custom_intro='\n Output type of [`YolosForObjectDetection`].\n ')
class YolosObjectDetectionOutput(ModelOutput):
'''
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` are provided)):
Total loss as a linear combination of a negative log-likehood (cross-entropy) for class prediction and a
bounding box loss. The latter is defined as a linear combination of the L1 loss and the generalized
scale-invariant IoU loss.
loss_dict (`Dict`, *optional*):
A dictionary containing the individual losses. Useful for logging.
logits (`torch.FloatTensor` of shape `(batch_size, num_queries, num_classes + 1)`):
Classification logits (including no-object) for all queries.
pred_boxes (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)`):
Normalized boxes coordinates for all queries, represented as (center_x, center_y, width, height). These
values are normalized in [0, 1], relative to the size of each individual image in the batch (disregarding
possible padding). You can use [`~YolosImageProcessor.post_process`] to retrieve the unnormalized bounding
boxes.
auxiliary_outputs (`list[Dict]`, *optional*):
Optional, only returned when auxiliary losses are activated (i.e. `config.auxiliary_loss` is set to `True`)
and labels are provided. It is a list of dictionaries containing the two above keys (`logits` and
`pred_boxes`) for each decoder layer.
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the decoder of the model.
'''
pass
| 3
| 1
| 0
| 0
| 0
| 0
| 0
| 3.44
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 42
| 2
| 9
| 9
| 8
| 31
| 9
| 9
| 8
| 0
| 1
| 0
| 0
|
6,302
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/yolos/modeling_yolos.py
|
transformers.models.yolos.modeling_yolos.YolosOutput
|
from .configuration_yolos import YolosConfig
import torch
from torch import nn
class YolosOutput(nn.Module):
def __init__(self, config: YolosConfig):
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = hidden_states + input_tensor
return hidden_states
|
class YolosOutput(nn.Module):
def __init__(self, config: YolosConfig):
pass
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 6
| 1
| 5
| 0
| 1
| 0
| 1
| 3
| 1
| 0
| 2
| 2
| 2
| 12
| 13
| 3
| 10
| 5
| 7
| 0
| 10
| 5
| 7
| 1
| 1
| 0
| 2
|
6,303
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/yolos/modeling_yolos.py
|
transformers.models.yolos.modeling_yolos.YolosPatchEmbeddings
|
import collections.abc
import torch
from torch import nn
class YolosPatchEmbeddings(nn.Module):
"""
This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial
`hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a
Transformer.
"""
def __init__(self, config):
super().__init__()
image_size, patch_size = (config.image_size, config.patch_size)
num_channels, hidden_size = (config.num_channels, config.hidden_size)
image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size)
patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size)
num_patches = image_size[1] // patch_size[1] * (image_size[0] // patch_size[0])
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.num_patches = num_patches
self.projection = nn.Conv2d(num_channels, hidden_size, kernel_size=patch_size, stride=patch_size)
def forward(self, pixel_values: torch.Tensor) -> torch.Tensor:
batch_size, num_channels, height, width = pixel_values.shape
if num_channels != self.num_channels:
raise ValueError('Make sure that the channel dimension of the pixel values match with the one set in the configuration.')
embeddings = self.projection(pixel_values).flatten(2).transpose(1, 2)
return embeddings
|
class YolosPatchEmbeddings(nn.Module):
'''
This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial
`hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a
Transformer.
'''
def __init__(self, config):
pass
def forward(self, pixel_values: torch.Tensor) -> torch.Tensor:
pass
| 3
| 1
| 12
| 2
| 10
| 0
| 3
| 0.24
| 1
| 4
| 0
| 0
| 2
| 5
| 2
| 12
| 31
| 5
| 21
| 13
| 18
| 5
| 19
| 13
| 16
| 3
| 1
| 1
| 5
|
6,304
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/yolos/modeling_yolos.py
|
transformers.models.yolos.modeling_yolos.YolosPooler
|
import torch
from torch import nn
from .configuration_yolos import YolosConfig
class YolosPooler(nn.Module):
def __init__(self, config: YolosConfig):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
|
class YolosPooler(nn.Module):
def __init__(self, config: YolosConfig):
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 6
| 0
| 5
| 1
| 1
| 0.2
| 1
| 2
| 1
| 0
| 2
| 2
| 2
| 12
| 13
| 1
| 10
| 7
| 7
| 2
| 10
| 7
| 7
| 1
| 1
| 0
| 2
|
6,305
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/yolos/modeling_yolos.py
|
transformers.models.yolos.modeling_yolos.YolosPreTrainedModel
|
from ...utils import ModelOutput, TransformersKwargs, auto_docstring, logging
from torch import nn
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from .configuration_yolos import YolosConfig
from typing import Callable, Optional, Union
@auto_docstring
class YolosPreTrainedModel(PreTrainedModel):
config: YolosConfig
base_model_prefix = 'vit'
main_input_name = 'pixel_values'
supports_gradient_checkpointing = True
_no_split_modules = []
_supports_sdpa = True
_supports_flash_attn = True
_supports_flex_attn = True
_supports_attention_backend = True
_can_record_outputs = {'hidden_states': YolosLayer, 'attentions': YolosSelfAttention}
def _init_weights(self, module: Union[nn.Linear, nn.Conv2d, nn.LayerNorm]) -> None:
"""Initialize the weights"""
if isinstance(module, (nn.Linear, nn.Conv2d)):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
|
@auto_docstring
class YolosPreTrainedModel(PreTrainedModel):
def _init_weights(self, module: Union[nn.Linear, nn.Conv2d, nn.LayerNorm]) -> None:
'''Initialize the weights'''
pass
| 3
| 1
| 11
| 0
| 8
| 3
| 4
| 0.47
| 1
| 0
| 0
| 2
| 1
| 0
| 1
| 1
| 24
| 2
| 15
| 8
| 13
| 7
| 14
| 8
| 12
| 4
| 1
| 2
| 4
|
6,306
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/yolos/modeling_yolos.py
|
transformers.models.yolos.modeling_yolos.YolosSelfAttention
|
from .configuration_yolos import YolosConfig
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from torch import nn
from typing import Callable, Optional, Union
import torch
class YolosSelfAttention(nn.Module):
def __init__(self, config: YolosConfig):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and (not hasattr(config, 'embedding_size')):
raise ValueError(f'The hidden size {config.hidden_size} is not a multiple of the number of attention heads {config.num_attention_heads}.')
self.config = config
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.dropout_prob = config.attention_probs_dropout_prob
self.scaling = self.attention_head_size ** (-0.5)
self.is_causal = False
self.query = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias)
self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias)
self.value = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias)
def forward(self, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor]=None) -> tuple[torch.Tensor, torch.Tensor]:
batch_size = hidden_states.shape[0]
new_shape = (batch_size, -1, self.num_attention_heads, self.attention_head_size)
key_layer = self.key(hidden_states).view(*new_shape).transpose(1, 2)
value_layer = self.value(hidden_states).view(*new_shape).transpose(1, 2)
query_layer = self.query(hidden_states).view(*new_shape).transpose(1, 2)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != 'eager':
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
context_layer, attention_probs = attention_interface(self, query_layer, key_layer, value_layer, head_mask, is_causal=self.is_causal, scaling=self.scaling, dropout=0.0 if not self.training else self.dropout_prob)
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.reshape(new_context_layer_shape)
return (context_layer, attention_probs)
|
class YolosSelfAttention(nn.Module):
def __init__(self, config: YolosConfig):
pass
def forward(self, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor]=None) -> tuple[torch.Tensor, torch.Tensor]:
pass
| 3
| 0
| 18
| 4
| 12
| 2
| 2
| 0.13
| 1
| 6
| 1
| 1
| 3
| 7
| 3
| 13
| 58
| 15
| 38
| 23
| 32
| 5
| 33
| 21
| 29
| 3
| 1
| 1
| 6
|
6,307
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/yolos/modeling_yolos.py
|
transformers.models.yolos.modeling_yolos.YolosSelfOutput
|
from torch import nn
from .configuration_yolos import YolosConfig
import torch
class YolosSelfOutput(nn.Module):
"""
The residual connection is defined in YolosLayer instead of here (as is the case with other models), due to the
layernorm applied before each block.
"""
def __init__(self, config: YolosConfig):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
return hidden_states
|
class YolosSelfOutput(nn.Module):
'''
The residual connection is defined in YolosLayer instead of here (as is the case with other models), due to the
layernorm applied before each block.
'''
def __init__(self, config: YolosConfig):
pass
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
pass
| 3
| 1
| 5
| 1
| 4
| 0
| 1
| 0.44
| 1
| 3
| 1
| 0
| 2
| 2
| 2
| 12
| 16
| 3
| 9
| 5
| 6
| 4
| 9
| 5
| 6
| 1
| 1
| 0
| 2
|
6,308
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/yoso/configuration_yoso.py
|
transformers.models.yoso.configuration_yoso.YosoConfig
|
from ...configuration_utils import PretrainedConfig
class YosoConfig(PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`YosoModel`]. It is used to instantiate an YOSO
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the YOSO
[uw-madison/yoso-4096](https://huggingface.co/uw-madison/yoso-4096) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 50265):
Vocabulary size of the YOSO model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`YosoModel`].
hidden_size (`int`, *optional*, defaults to 768):
Dimension of the encoder layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (`int`, *optional*, defaults to 3072):
Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` are supported.
hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout ratio for the attention probabilities.
max_position_embeddings (`int`, *optional*, defaults to 512):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
type_vocab_size (`int`, *optional*, defaults to 2):
The vocabulary size of the `token_type_ids` passed when calling [`YosoModel`].
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
The epsilon used by the layer normalization layers.
position_embedding_type (`str`, *optional*, defaults to `"absolute"`):
Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`.
use_expectation (`bool`, *optional*, defaults to `True`):
Whether or not to use YOSO Expectation. Overrides any effect of num_hash.
hash_code_len (`int`, *optional*, defaults to 9):
The length of hashes generated by the hash functions.
num_hash (`int`, *optional*, defaults to 64):
Number of hash functions used in [`YosoSelfAttention`].
conv_window (`int`, *optional*):
Kernel size of depth-wise convolution.
use_fast_hash (`bool`, *optional*, defaults to `False`):
Whether or not to use custom cuda kernels which perform fast random projection via hadamard transform.
lsh_backward (`bool`, *optional*, defaults to `True`):
Whether or not to perform backpropagation using Locality Sensitive Hashing.
Example:
```python
>>> from transformers import YosoConfig, YosoModel
>>> # Initializing a YOSO uw-madison/yoso-4096 style configuration
>>> configuration = YosoConfig()
>>> # Initializing a model (with random weights) from the uw-madison/yoso-4096 style configuration
>>> model = YosoModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = 'yoso'
def __init__(self, vocab_size=50265, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=4096, type_vocab_size=1, initializer_range=0.02, layer_norm_eps=1e-12, position_embedding_type='absolute', use_expectation=True, hash_code_len=9, num_hash=64, conv_window=None, use_fast_hash=True, lsh_backward=True, pad_token_id=1, bos_token_id=0, eos_token_id=2, **kwargs):
super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.initializer_range = initializer_range
self.type_vocab_size = type_vocab_size
self.layer_norm_eps = layer_norm_eps
self.position_embedding_type = position_embedding_type
self.use_expectation = use_expectation
self.hash_code_len = hash_code_len
self.num_hash = num_hash
self.conv_window = conv_window
self.use_fast_hash = use_fast_hash
self.lsh_backward = lsh_backward
|
class YosoConfig(PretrainedConfig):
'''
This is the configuration class to store the configuration of a [`YosoModel`]. It is used to instantiate an YOSO
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the YOSO
[uw-madison/yoso-4096](https://huggingface.co/uw-madison/yoso-4096) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 50265):
Vocabulary size of the YOSO model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`YosoModel`].
hidden_size (`int`, *optional*, defaults to 768):
Dimension of the encoder layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (`int`, *optional*, defaults to 3072):
Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` are supported.
hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout ratio for the attention probabilities.
max_position_embeddings (`int`, *optional*, defaults to 512):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
type_vocab_size (`int`, *optional*, defaults to 2):
The vocabulary size of the `token_type_ids` passed when calling [`YosoModel`].
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
The epsilon used by the layer normalization layers.
position_embedding_type (`str`, *optional*, defaults to `"absolute"`):
Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`.
use_expectation (`bool`, *optional*, defaults to `True`):
Whether or not to use YOSO Expectation. Overrides any effect of num_hash.
hash_code_len (`int`, *optional*, defaults to 9):
The length of hashes generated by the hash functions.
num_hash (`int`, *optional*, defaults to 64):
Number of hash functions used in [`YosoSelfAttention`].
conv_window (`int`, *optional*):
Kernel size of depth-wise convolution.
use_fast_hash (`bool`, *optional*, defaults to `False`):
Whether or not to use custom cuda kernels which perform fast random projection via hadamard transform.
lsh_backward (`bool`, *optional*, defaults to `True`):
Whether or not to perform backpropagation using Locality Sensitive Hashing.
Example:
```python
>>> from transformers import YosoConfig, YosoModel
>>> # Initializing a YOSO uw-madison/yoso-4096 style configuration
>>> configuration = YosoConfig()
>>> # Initializing a model (with random weights) from the uw-madison/yoso-4096 style configuration
>>> model = YosoModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```'''
def __init__(self, vocab_size=50265, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=4096, type_vocab_size=1, initializer_range=0.02, layer_norm_eps=1e-12, position_embedding_type='absolute', use_expectation=True, hash_code_len=9, num_hash=64, conv_window=None, use_fast_hash=True, lsh_backward=True, pad_token_id=1, bos_token_id=0, eos_token_id=2, **kwargs):
pass
| 2
| 1
| 47
| 1
| 46
| 0
| 1
| 1.23
| 1
| 1
| 0
| 0
| 1
| 19
| 1
| 1
| 118
| 11
| 48
| 47
| 21
| 59
| 23
| 22
| 21
| 1
| 1
| 0
| 1
|
6,309
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/yoso/modeling_yoso.py
|
transformers.models.yoso.modeling_yoso.YosoAttention
|
from torch import nn
from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
class YosoAttention(nn.Module):
def __init__(self, config, position_embedding_type=None):
super().__init__()
self.self = YosoSelfAttention(config, position_embedding_type=position_embedding_type)
self.output = YosoSelfOutput(config)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads)
self.self.query = prune_linear_layer(self.self.query, index)
self.self.key = prune_linear_layer(self.self.key, index)
self.self.value = prune_linear_layer(self.self.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(self, hidden_states, attention_mask=None, output_attentions=False):
self_outputs = self.self(hidden_states, attention_mask, output_attentions)
attention_output = self.output(self_outputs[0], hidden_states)
outputs = (attention_output,) + self_outputs[1:]
return outputs
|
class YosoAttention(nn.Module):
def __init__(self, config, position_embedding_type=None):
pass
def prune_heads(self, heads):
pass
def forward(self, hidden_states, attention_mask=None, output_attentions=False):
pass
| 4
| 0
| 9
| 1
| 8
| 1
| 1
| 0.13
| 1
| 4
| 2
| 0
| 3
| 3
| 3
| 13
| 30
| 4
| 24
| 11
| 20
| 3
| 22
| 11
| 18
| 2
| 1
| 1
| 4
|
6,310
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/yoso/modeling_yoso.py
|
transformers.models.yoso.modeling_yoso.YosoClassificationHead
|
from ...activations import ACT2FN
from torch import nn
class YosoClassificationHead(nn.Module):
"""Head for sentence-level classification tasks."""
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.out_proj = nn.Linear(config.hidden_size, config.num_labels)
self.config = config
def forward(self, features, **kwargs):
x = features[:, 0, :]
x = self.dropout(x)
x = self.dense(x)
x = ACT2FN[self.config.hidden_act](x)
x = self.dropout(x)
x = self.out_proj(x)
return x
|
class YosoClassificationHead(nn.Module):
'''Head for sentence-level classification tasks.'''
def __init__(self, config):
pass
def forward(self, features, **kwargs):
pass
| 3
| 1
| 8
| 1
| 7
| 1
| 1
| 0.13
| 1
| 1
| 0
| 0
| 2
| 4
| 2
| 12
| 19
| 3
| 15
| 8
| 12
| 2
| 15
| 8
| 12
| 1
| 1
| 0
| 2
|
6,311
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/yoso/modeling_yoso.py
|
transformers.models.yoso.modeling_yoso.YosoCumulation
|
import math
import torch
class YosoCumulation(torch.autograd.Function):
@staticmethod
def forward(ctx, query_mask, key_mask, query, key, value, config):
hash_code_len = config['hash_code_len']
expectation = (1 - torch.acos(torch.matmul(query, key.transpose(-1, -2))) / math.pi) ** hash_code_len
expectation = expectation * query_mask[:, :, None] * key_mask[:, None, :]
cumulation_value = torch.matmul(expectation, value)
ctx.save_for_backward(query_mask, key_mask, expectation, query, key, value)
ctx.config = config
return cumulation_value
@staticmethod
def backward(ctx, grad):
grad = to_contiguous(grad)
query_mask, key_mask, expectation, query, key, value = ctx.saved_tensors
config = ctx.config
hash_code_len = config['hash_code_len']
weighted_exp = torch.matmul(grad, value.transpose(-1, -2)) * expectation
grad_query = torch.matmul(weighted_exp, hash_code_len / 2 * key)
grad_key = torch.matmul(weighted_exp.transpose(-1, -2), hash_code_len / 2 * query)
grad_value = torch.matmul(expectation.transpose(-1, -2), grad)
return (None, None, grad_query, grad_key, grad_value, None)
|
class YosoCumulation(torch.autograd.Function):
@staticmethod
def forward(ctx, query_mask, key_mask, query, key, value, config):
pass
@staticmethod
def backward(ctx, grad):
pass
| 5
| 0
| 13
| 4
| 9
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 2
| 32
| 29
| 8
| 21
| 15
| 16
| 0
| 19
| 13
| 16
| 1
| 5
| 0
| 2
|
6,312
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/yoso/modeling_yoso.py
|
transformers.models.yoso.modeling_yoso.YosoEmbeddings
|
from torch import nn
import torch
class YosoEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings."""
def __init__(self, config):
super().__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
self.position_embeddings = nn.Embedding(config.max_position_embeddings + 2, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.register_buffer('position_ids', torch.arange(config.max_position_embeddings).expand((1, -1)) + 2, persistent=False)
self.position_embedding_type = getattr(config, 'position_embedding_type', 'absolute')
self.register_buffer('token_type_ids', torch.zeros(self.position_ids.size(), dtype=torch.long, device=self.position_ids.device), persistent=False)
def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None):
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
seq_length = input_shape[1]
if position_ids is None:
position_ids = self.position_ids[:, :seq_length]
if token_type_ids is None:
if hasattr(self, 'token_type_ids'):
buffered_token_type_ids = self.token_type_ids[:, :seq_length]
buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length)
token_type_ids = buffered_token_type_ids_expanded
else:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + token_type_embeddings
if self.position_embedding_type == 'absolute':
position_embeddings = self.position_embeddings(position_ids)
embeddings += position_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
|
class YosoEmbeddings(nn.Module):
'''Construct the embeddings from word, position and token_type embeddings.'''
def __init__(self, config):
pass
def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None):
pass
| 3
| 1
| 27
| 4
| 21
| 3
| 4
| 0.17
| 1
| 1
| 0
| 0
| 2
| 6
| 2
| 12
| 58
| 9
| 42
| 16
| 39
| 7
| 34
| 16
| 31
| 7
| 1
| 2
| 8
|
6,313
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/yoso/modeling_yoso.py
|
transformers.models.yoso.modeling_yoso.YosoEncoder
|
from ...modeling_outputs import BaseModelOutputWithCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput
from torch import nn
class YosoEncoder(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.layer = nn.ModuleList([YosoLayer(config) for _ in range(config.num_hidden_layers)])
self.gradient_checkpointing = False
def forward(self, hidden_states, attention_mask=None, head_mask=None, output_attentions=False, output_hidden_states=False, return_dict=True):
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_outputs = layer_module(hidden_states, attention_mask, output_attentions)
hidden_states = layer_outputs[0]
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple((v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None))
return BaseModelOutputWithCrossAttentions(last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions)
|
class YosoEncoder(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states, attention_mask=None, head_mask=None, output_attentions=False, output_hidden_states=False, return_dict=True):
pass
| 3
| 0
| 23
| 3
| 20
| 0
| 5
| 0
| 1
| 6
| 2
| 0
| 2
| 3
| 2
| 12
| 47
| 6
| 41
| 18
| 30
| 0
| 23
| 10
| 20
| 9
| 1
| 2
| 10
|
6,314
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/yoso/modeling_yoso.py
|
transformers.models.yoso.modeling_yoso.YosoForMaskedLM
|
import torch
from ...utils import auto_docstring, is_ninja_available, is_torch_cuda_available, logging
from ...modeling_outputs import BaseModelOutputWithCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from typing import Optional, Union
@auto_docstring
class YosoForMaskedLM(YosoPreTrainedModel):
_tied_weights_keys = ['cls.predictions.decoder.weight', 'cls.predictions.decoder.bias']
def __init__(self, config):
super().__init__(config)
self.yoso = YosoModel(config)
self.cls = YosoOnlyMLMHead(config)
self.post_init()
def get_output_embeddings(self):
return self.cls.predictions.decoder
def set_output_embeddings(self, new_embeddings):
self.cls.predictions.decoder = new_embeddings
self.cls.predictions.bias = new_embeddings.bias
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, MaskedLMOutput]:
"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.yoso(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
sequence_output = outputs[0]
prediction_scores = self.cls(sequence_output)
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (prediction_scores,) + outputs[1:]
return (masked_lm_loss,) + output if masked_lm_loss is not None else output
return MaskedLMOutput(loss=masked_lm_loss, logits=prediction_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
|
@auto_docstring
class YosoForMaskedLM(YosoPreTrainedModel):
def __init__(self, config):
pass
def get_output_embeddings(self):
pass
def set_output_embeddings(self, new_embeddings):
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, MaskedLMOutput]:
'''
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
'''
pass
| 7
| 1
| 16
| 2
| 13
| 2
| 2
| 0.14
| 1
| 6
| 3
| 0
| 4
| 2
| 4
| 5
| 76
| 11
| 58
| 27
| 35
| 8
| 25
| 14
| 20
| 5
| 2
| 1
| 8
|
6,315
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/yoso/modeling_yoso.py
|
transformers.models.yoso.modeling_yoso.YosoForMultipleChoice
|
from torch import nn
from ...utils import auto_docstring, is_ninja_available, is_torch_cuda_available, logging
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
import torch
from typing import Optional, Union
from ...modeling_outputs import BaseModelOutputWithCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput
@auto_docstring
class YosoForMultipleChoice(YosoPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.yoso = YosoModel(config)
self.pre_classifier = nn.Linear(config.hidden_size, config.hidden_size)
self.classifier = nn.Linear(config.hidden_size, 1)
self.post_init()
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, MultipleChoiceModelOutput]:
"""
input_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
token_type_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
1]`:
- 0 corresponds to a *sentence A* token,
- 1 corresponds to a *sentence B* token.
[What are token type IDs?](../glossary#token-type-ids)
position_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
config.max_position_embeddings - 1]`.
[What are position IDs?](../glossary#position-ids)
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_choices, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
is useful if you want more control over how to convert *input_ids* indices into associated vectors than the
model's internal embedding lookup matrix.
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
`input_ids` above)
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
inputs_embeds = inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1)) if inputs_embeds is not None else None
outputs = self.yoso(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
hidden_state = outputs[0]
pooled_output = hidden_state[:, 0]
pooled_output = self.pre_classifier(pooled_output)
pooled_output = nn.ReLU()(pooled_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.view(-1, num_choices)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
if not return_dict:
output = (reshaped_logits,) + outputs[1:]
return (loss,) + output if loss is not None else output
return MultipleChoiceModelOutput(loss=loss, logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
|
@auto_docstring
class YosoForMultipleChoice(YosoPreTrainedModel):
def __init__(self, config):
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, MultipleChoiceModelOutput]:
'''
input_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
token_type_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
1]`:
- 0 corresponds to a *sentence A* token,
- 1 corresponds to a *sentence B* token.
[What are token type IDs?](../glossary#token-type-ids)
position_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
config.max_position_embeddings - 1]`.
[What are position IDs?](../glossary#position-ids)
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_choices, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
is useful if you want more control over how to convert *input_ids* indices into associated vectors than the
model's internal embedding lookup matrix.
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
`input_ids` above)
'''
pass
| 5
| 1
| 38
| 5
| 30
| 6
| 6
| 0.16
| 1
| 5
| 2
| 0
| 2
| 3
| 2
| 3
| 84
| 10
| 67
| 28
| 46
| 11
| 30
| 15
| 27
| 11
| 2
| 1
| 12
|
6,316
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/yoso/modeling_yoso.py
|
transformers.models.yoso.modeling_yoso.YosoForQuestionAnswering
|
from ...utils import auto_docstring, is_ninja_available, is_torch_cuda_available, logging
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from torch import nn
from typing import Optional, Union
from ...modeling_outputs import BaseModelOutputWithCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput
import torch
@auto_docstring
class YosoForQuestionAnswering(YosoPreTrainedModel):
def __init__(self, config):
super().__init__(config)
config.num_labels = 2
self.num_labels = config.num_labels
self.yoso = YosoModel(config)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
self.post_init()
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, start_positions: Optional[torch.Tensor]=None, end_positions: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, QuestionAnsweringModelOutput]:
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.yoso(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1)
end_logits = end_logits.squeeze(-1)
total_loss = None
if start_positions is not None and end_positions is not None:
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
ignored_index = start_logits.size(1)
start_positions = start_positions.clamp(0, ignored_index)
end_positions = end_positions.clamp(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
if not return_dict:
output = (start_logits, end_logits) + outputs[1:]
return (total_loss,) + output if total_loss is not None else output
return QuestionAnsweringModelOutput(loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
|
@auto_docstring
class YosoForQuestionAnswering(YosoPreTrainedModel):
def __init__(self, config):
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, start_positions: Optional[torch.Tensor]=None, end_positions: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, QuestionAnsweringModelOutput]:
pass
| 5
| 0
| 42
| 5
| 31
| 7
| 4
| 0.19
| 1
| 5
| 2
| 0
| 2
| 3
| 2
| 3
| 92
| 11
| 68
| 30
| 46
| 13
| 33
| 16
| 30
| 7
| 2
| 2
| 8
|
6,317
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/yoso/modeling_yoso.py
|
transformers.models.yoso.modeling_yoso.YosoForSequenceClassification
|
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...utils import auto_docstring, is_ninja_available, is_torch_cuda_available, logging
import torch
from typing import Optional, Union
from ...modeling_outputs import BaseModelOutputWithCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput
@auto_docstring(custom_intro='\n YOSO Model transformer with a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks.\n ')
class YosoForSequenceClassification(YosoPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.yoso = YosoModel(config)
self.classifier = YosoClassificationHead(config)
self.post_init()
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, SequenceClassifierOutput]:
"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.yoso(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
sequence_output = outputs[0]
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
self.config.problem_type = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = 'single_label_classification'
else:
self.config.problem_type = 'multi_label_classification'
if self.config.problem_type == 'regression':
loss_fct = MSELoss()
if self.num_labels == 1:
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(logits, labels)
elif self.config.problem_type == 'single_label_classification':
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
elif self.config.problem_type == 'multi_label_classification':
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
if not return_dict:
output = (logits,) + outputs[1:]
return (loss,) + output if loss is not None else output
return SequenceClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
|
@auto_docstring(custom_intro='\n YOSO Model transformer with a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks.\n ')
class YosoForSequenceClassification(YosoPreTrainedModel):
def __init__(self, config):
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, SequenceClassifierOutput]:
'''
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
'''
pass
| 5
| 1
| 38
| 3
| 32
| 4
| 7
| 0.1
| 1
| 7
| 3
| 0
| 2
| 3
| 2
| 3
| 84
| 7
| 70
| 25
| 49
| 7
| 32
| 12
| 29
| 12
| 2
| 3
| 13
|
6,318
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/yoso/modeling_yoso.py
|
transformers.models.yoso.modeling_yoso.YosoForTokenClassification
|
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...utils import auto_docstring, is_ninja_available, is_torch_cuda_available, logging
from ...modeling_outputs import BaseModelOutputWithCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput
from typing import Optional, Union
import torch
@auto_docstring
class YosoForTokenClassification(YosoPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.yoso = YosoModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.post_init()
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, TokenClassifierOutput]:
"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.yoso(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
if attention_mask is not None:
active_loss = attention_mask.view(-1) == 1
active_logits = logits.view(-1, self.num_labels)
active_labels = torch.where(active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels))
loss = loss_fct(active_logits, active_labels)
else:
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[1:]
return (loss,) + output if loss is not None else output
return TokenClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
|
@auto_docstring
class YosoForTokenClassification(YosoPreTrainedModel):
def __init__(self, config):
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, TokenClassifierOutput]:
'''
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
'''
pass
| 5
| 1
| 35
| 4
| 28
| 3
| 4
| 0.1
| 1
| 5
| 2
| 0
| 2
| 4
| 2
| 3
| 78
| 9
| 63
| 29
| 42
| 6
| 27
| 16
| 24
| 6
| 2
| 2
| 7
|
6,319
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/yoso/modeling_yoso.py
|
transformers.models.yoso.modeling_yoso.YosoIntermediate
|
from ...activations import ACT2FN
from torch import nn
import torch
class YosoIntermediate(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
|
class YosoIntermediate(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 6
| 0
| 6
| 0
| 2
| 0
| 1
| 3
| 0
| 0
| 2
| 2
| 2
| 12
| 13
| 1
| 12
| 5
| 9
| 0
| 11
| 5
| 8
| 2
| 1
| 1
| 3
|
6,320
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/yoso/modeling_yoso.py
|
transformers.models.yoso.modeling_yoso.YosoLMPredictionHead
|
import torch
from torch import nn
class YosoLMPredictionHead(nn.Module):
def __init__(self, config):
super().__init__()
self.transform = YosoPredictionHeadTransform(config)
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
self.decoder.bias = self.bias
def _tie_weights(self):
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
|
class YosoLMPredictionHead(nn.Module):
def __init__(self, config):
pass
def _tie_weights(self):
pass
def forward(self, hidden_states):
pass
| 4
| 0
| 6
| 1
| 4
| 1
| 1
| 0.23
| 1
| 2
| 1
| 0
| 3
| 3
| 3
| 13
| 21
| 5
| 13
| 7
| 9
| 3
| 13
| 7
| 9
| 1
| 1
| 0
| 3
|
6,321
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/yoso/modeling_yoso.py
|
transformers.models.yoso.modeling_yoso.YosoLSHCumulation
|
import math
import torch
class YosoLSHCumulation(torch.autograd.Function):
@staticmethod
def forward(ctx, query_mask, key_mask, query, key, value, config):
if query_mask.size(0) != key_mask.size(0):
raise ValueError('Query mask and Key mask differ in sizes in dimension 0')
if query_mask.size(0) != query.size(0):
raise ValueError('Query mask and Query differ in sizes in dimension 0')
if query_mask.size(0) != key.size(0):
raise ValueError('Query mask and Key differ in sizes in dimension 0')
if query_mask.size(0) != value.size(0):
raise ValueError('Query mask and Value mask differ in sizes in dimension 0')
if key.size(1) != value.size(1):
raise ValueError('Key and Value differ in sizes in dimension 1')
if query.size(2) != key.size(2):
raise ValueError('Query and Key differ in sizes in dimension 2')
query_mask, key_mask, query, key, value = to_contiguous([query_mask, key_mask, query, key, value])
use_cuda = query_mask.is_cuda
num_hash = config['num_hash']
hash_code_len = config['hash_code_len']
hashtable_capacity = int(2 ** hash_code_len)
if config['use_fast_hash']:
query_hash_code, key_hash_code = lsh_cumulation.fast_hash(query_mask, query, key_mask, key, num_hash, hash_code_len, use_cuda, 1)
else:
query_hash_code, key_hash_code = hashing(query, key, num_hash, hash_code_len)
cumulation_value = lsh_cumulation.lsh_cumulation(query_mask, query_hash_code, key_mask, key_hash_code, value, hashtable_capacity, use_cuda, 1)
ctx.save_for_backward(query_mask, key_mask, query_hash_code, key_hash_code, query, key, value)
ctx.config = config
return cumulation_value
@staticmethod
def backward(ctx, grad):
grad = to_contiguous(grad)
query_mask, key_mask, query_hash_code, key_hash_code, query, key, value = ctx.saved_tensors
config = ctx.config
use_cuda = grad.is_cuda
hash_code_len = config['hash_code_len']
hashtable_capacity = int(2 ** hash_code_len)
if config['lsh_backward']:
grad_value = lsh_cumulation.lsh_cumulation(key_mask, key_hash_code, query_mask, query_hash_code, grad, hashtable_capacity, use_cuda, 1)
grad_query = lsh_cumulation.lsh_weighted_cumulation(query_mask, query_hash_code, grad, key_mask, key_hash_code, value, hash_code_len / 2 * key, hashtable_capacity, use_cuda, 4)
grad_key = lsh_cumulation.lsh_weighted_cumulation(key_mask, key_hash_code, value, query_mask, query_hash_code, grad, hash_code_len / 2 * query, hashtable_capacity, use_cuda, 4)
else:
expectation = (1 - torch.acos(torch.matmul(query, key.transpose(-1, -2))) / math.pi) ** hash_code_len
expectation = expectation * query_mask[:, :, None] * key_mask[:, None, :]
weighted_exp = torch.matmul(grad, value.transpose(-1, -2)) * expectation
grad_query = torch.matmul(weighted_exp, hash_code_len / 2 * key)
grad_key = torch.matmul(weighted_exp.transpose(-1, -2), hash_code_len / 2 * query)
grad_value = torch.matmul(expectation.transpose(-1, -2), grad)
return (None, None, grad_query, grad_key, grad_value, None)
|
class YosoLSHCumulation(torch.autograd.Function):
@staticmethod
def forward(ctx, query_mask, key_mask, query, key, value, config):
pass
@staticmethod
def backward(ctx, grad):
pass
| 5
| 0
| 42
| 5
| 37
| 0
| 5
| 0
| 1
| 2
| 0
| 0
| 0
| 0
| 2
| 32
| 87
| 11
| 76
| 21
| 71
| 0
| 44
| 19
| 41
| 8
| 5
| 1
| 10
|
6,322
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/yoso/modeling_yoso.py
|
transformers.models.yoso.modeling_yoso.YosoLayer
|
from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
from ...modeling_layers import GradientCheckpointingLayer
class YosoLayer(GradientCheckpointingLayer):
def __init__(self, config):
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = YosoAttention(config)
self.add_cross_attention = config.add_cross_attention
self.intermediate = YosoIntermediate(config)
self.output = YosoOutput(config)
def forward(self, hidden_states, attention_mask=None, output_attentions=False):
self_attention_outputs = self.attention(hidden_states, attention_mask, output_attentions=output_attentions)
attention_output = self_attention_outputs[0]
outputs = self_attention_outputs[1:]
layer_output = apply_chunking_to_forward(self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output)
outputs = (layer_output,) + outputs
return outputs
def feed_forward_chunk(self, attention_output):
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
|
class YosoLayer(GradientCheckpointingLayer):
def __init__(self, config):
pass
def forward(self, hidden_states, attention_mask=None, output_attentions=False):
pass
def feed_forward_chunk(self, attention_output):
pass
| 4
| 0
| 8
| 1
| 7
| 0
| 1
| 0.05
| 1
| 4
| 3
| 0
| 3
| 6
| 3
| 13
| 27
| 5
| 22
| 16
| 18
| 1
| 20
| 16
| 16
| 1
| 1
| 0
| 3
|
6,323
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/yoso/modeling_yoso.py
|
transformers.models.yoso.modeling_yoso.YosoModel
|
import torch
from ...modeling_outputs import BaseModelOutputWithCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput
from ...utils import auto_docstring, is_ninja_available, is_torch_cuda_available, logging
from typing import Optional, Union
@auto_docstring
class YosoModel(YosoPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.config = config
self.embeddings = YosoEmbeddings(config)
self.encoder = YosoEncoder(config)
self.post_init()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutputWithCrossAttentions]:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time')
elif input_ids is not None:
self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError('You have to specify either input_ids or inputs_embeds')
batch_size, seq_length = input_shape
device = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
attention_mask = torch.ones((batch_size, seq_length), device=device)
if token_type_ids is None:
if hasattr(self.embeddings, 'token_type_ids'):
buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length]
buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length)
token_type_ids = buffered_token_type_ids_expanded
else:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
embedding_output = self.embeddings(input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds)
encoder_outputs = self.encoder(embedding_output, attention_mask=attention_mask, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
sequence_output = encoder_outputs[0]
if not return_dict:
return (sequence_output,) + encoder_outputs[1:]
return BaseModelOutputWithCrossAttentions(last_hidden_state=sequence_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, cross_attentions=encoder_outputs.cross_attentions)
|
@auto_docstring
class YosoModel(YosoPreTrainedModel):
def __init__(self, config):
pass
def get_input_embeddings(self):
pass
def set_input_embeddings(self, value):
pass
def _prune_heads(self, heads_to_prune):
'''
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
'''
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutputWithCrossAttentions]:
pass
| 8
| 1
| 19
| 2
| 15
| 2
| 3
| 0.12
| 1
| 7
| 3
| 0
| 5
| 3
| 5
| 6
| 105
| 14
| 81
| 30
| 58
| 10
| 41
| 18
| 35
| 12
| 2
| 2
| 17
|
6,324
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/yoso/modeling_yoso.py
|
transformers.models.yoso.modeling_yoso.YosoOnlyMLMHead
|
import torch
from torch import nn
class YosoOnlyMLMHead(nn.Module):
def __init__(self, config):
super().__init__()
self.predictions = YosoLMPredictionHead(config)
def forward(self, sequence_output: torch.Tensor) -> torch.Tensor:
prediction_scores = self.predictions(sequence_output)
return prediction_scores
|
class YosoOnlyMLMHead(nn.Module):
def __init__(self, config):
pass
def forward(self, sequence_output: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 3
| 0
| 3
| 0
| 1
| 0
| 1
| 3
| 1
| 0
| 2
| 1
| 2
| 12
| 8
| 1
| 7
| 5
| 4
| 0
| 7
| 5
| 4
| 1
| 1
| 0
| 2
|
6,325
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/yoso/modeling_yoso.py
|
transformers.models.yoso.modeling_yoso.YosoOutput
|
import torch
from torch import nn
class YosoOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
|
class YosoOutput(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 5
| 0
| 5
| 0
| 1
| 0
| 1
| 2
| 0
| 0
| 2
| 3
| 2
| 12
| 12
| 1
| 11
| 6
| 8
| 0
| 11
| 6
| 8
| 1
| 1
| 0
| 2
|
6,326
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/yoso/modeling_yoso.py
|
transformers.models.yoso.modeling_yoso.YosoPreTrainedModel
|
from ...modeling_utils import PreTrainedModel
from ...utils import auto_docstring, is_ninja_available, is_torch_cuda_available, logging
from .configuration_yoso import YosoConfig
from torch import nn
@auto_docstring
class YosoPreTrainedModel(PreTrainedModel):
config: YosoConfig
base_model_prefix = 'yoso'
supports_gradient_checkpointing = True
def _init_weights(self, module: nn.Module):
"""Initialize the weights"""
std = self.config.initializer_range
if isinstance(module, nn.Linear):
module.weight.data.normal_(mean=0.0, std=std)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=std)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
elif isinstance(module, YosoLMPredictionHead):
module.bias.data.zero_()
|
@auto_docstring
class YosoPreTrainedModel(PreTrainedModel):
def _init_weights(self, module: nn.Module):
'''Initialize the weights'''
pass
| 3
| 1
| 15
| 0
| 12
| 3
| 6
| 0.44
| 1
| 0
| 0
| 6
| 1
| 0
| 1
| 1
| 25
| 2
| 16
| 5
| 14
| 7
| 14
| 5
| 12
| 6
| 1
| 2
| 6
|
6,327
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/yoso/modeling_yoso.py
|
transformers.models.yoso.modeling_yoso.YosoPredictionHeadTransform
|
from torch import nn
import torch
from ...activations import ACT2FN
class YosoPredictionHeadTransform(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
if isinstance(config.hidden_act, str):
self.transform_act_fn = ACT2FN[config.hidden_act]
else:
self.transform_act_fn = config.hidden_act
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
|
class YosoPredictionHeadTransform(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 7
| 0
| 7
| 0
| 2
| 0
| 1
| 3
| 0
| 0
| 2
| 3
| 2
| 12
| 15
| 1
| 14
| 6
| 11
| 0
| 13
| 6
| 10
| 2
| 1
| 1
| 3
|
6,328
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/yoso/modeling_yoso.py
|
transformers.models.yoso.modeling_yoso.YosoSelfAttention
|
from ...utils import auto_docstring, is_ninja_available, is_torch_cuda_available, logging
from torch import nn
import torch
class YosoSelfAttention(nn.Module):
def __init__(self, config, position_embedding_type=None):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and (not hasattr(config, 'embedding_size')):
raise ValueError(f'The hidden size ({config.hidden_size}) is not a multiple of the number of attention heads ({config.num_attention_heads})')
kernel_loaded = lsh_cumulation is not None
if is_torch_cuda_available() and is_ninja_available() and (not kernel_loaded):
try:
load_cuda_kernels()
except Exception as e:
logger.warning(f'Could not load the custom kernel for multi-scale deformable attention: {e}')
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
self.position_embedding_type = position_embedding_type if position_embedding_type is not None else config.position_embedding_type
self.use_expectation = config.use_expectation
self.hash_code_len = config.hash_code_len
self.use_conv = config.conv_window is not None
self.use_fast_hash = config.use_fast_hash
self.num_hash = config.num_hash
self.lsh_backward = config.lsh_backward
self.lsh_config = {'hash_code_len': self.hash_code_len, 'use_fast_hash': self.use_fast_hash, 'num_hash': self.num_hash, 'lsh_backward': self.lsh_backward}
if config.conv_window is not None:
self.conv = nn.Conv2d(in_channels=config.num_attention_heads, out_channels=config.num_attention_heads, kernel_size=(config.conv_window, 1), padding=(config.conv_window // 2, 0), bias=False, groups=config.num_attention_heads)
def forward(self, hidden_states, attention_mask=None, output_attentions=False):
batch_size, seq_length, _ = hidden_states.shape
query_layer = self.query(hidden_states).view(batch_size, -1, self.num_attention_heads, self.attention_head_size).transpose(1, 2)
key_layer = self.key(hidden_states).view(batch_size, -1, self.num_attention_heads, self.attention_head_size).transpose(1, 2)
value_layer = self.value(hidden_states).view(batch_size, -1, self.num_attention_heads, self.attention_head_size).transpose(1, 2)
if self.use_conv:
conv_value_layer = self.conv(value_layer * attention_mask[:, None, :, None])
batch_size, num_heads, seq_len, head_dim = query_layer.size()
query_layer = query_layer.reshape(batch_size * num_heads, seq_len, head_dim)
key_layer = key_layer.reshape(batch_size * num_heads, seq_len, head_dim)
value_layer = value_layer.reshape(batch_size * num_heads, seq_len, head_dim)
attention_mask = 1.0 + attention_mask / 10000.0
attention_mask = attention_mask.unsqueeze(1).repeat_interleave(num_heads, dim=1).reshape(batch_size * num_heads, seq_len).int()
gpu_warp_size = 32
if not self.use_expectation and head_dim < gpu_warp_size:
pad_size = (batch_size * num_heads, seq_len, gpu_warp_size - head_dim)
query_layer = torch.cat([query_layer, torch.zeros(pad_size, device=query_layer.device)], dim=-1)
key_layer = torch.cat([key_layer, torch.zeros(pad_size, device=key_layer.device)], dim=-1)
value_layer = torch.cat([value_layer, torch.zeros(pad_size, device=value_layer.device)], dim=-1)
if self.use_expectation or self.training:
query_layer, key_layer = normalize([query_layer, key_layer])
if self.use_expectation:
context_layer = YosoCumulation.apply(attention_mask, attention_mask, query_layer, key_layer, value_layer, self.lsh_config)
else:
context_layer = YosoLSHCumulation.apply(attention_mask, attention_mask, query_layer, key_layer, value_layer, self.lsh_config)
if not self.use_expectation and head_dim < gpu_warp_size:
context_layer = context_layer[:, :, :head_dim]
context_layer = normalize(context_layer)
context_layer = context_layer.reshape(batch_size, num_heads, seq_len, head_dim)
if self.use_conv:
context_layer += conv_value_layer
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
outputs = (context_layer, context_layer) if output_attentions else (context_layer,)
return outputs
|
class YosoSelfAttention(nn.Module):
def __init__(self, config, position_embedding_type=None):
pass
def forward(self, hidden_states, attention_mask=None, output_attentions=False):
pass
| 3
| 0
| 45
| 8
| 37
| 1
| 5
| 0.02
| 1
| 6
| 2
| 0
| 3
| 16
| 3
| 13
| 139
| 25
| 112
| 34
| 108
| 2
| 67
| 33
| 63
| 8
| 1
| 2
| 15
|
6,329
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/yoso/modeling_yoso.py
|
transformers.models.yoso.modeling_yoso.YosoSelfOutput
|
import torch
from torch import nn
class YosoSelfOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
|
class YosoSelfOutput(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 5
| 0
| 5
| 0
| 1
| 0
| 1
| 2
| 0
| 0
| 2
| 3
| 2
| 12
| 12
| 1
| 11
| 6
| 8
| 0
| 11
| 6
| 8
| 1
| 1
| 0
| 2
|
6,330
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/zamba/configuration_zamba.py
|
transformers.models.zamba.configuration_zamba.ZambaConfig
|
import math
from ...configuration_utils import PretrainedConfig
class ZambaConfig(PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`ZambaModel`]. It is used to instantiate a
Zamba model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the Zamba-v0.1 model.
[Zyphra/Zamba-7B-v1](https://huggingface.co/Zyphra/Zamba-7B-v1)
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 32000):
Vocabulary size of the Zamba model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`ZambaModel`]
tie_word_embeddings (`bool`, *optional*, defaults to `True`):
Whether the model's input and output word embeddings should be tied. Note that this is only relevant if the
model has a output word embedding layer.
hidden_size (`int`, *optional*, defaults to 3712):
Dimension of the hidden representations.
attention_hidden_size (`int`, *optional*):
Dimension of the hidden representations of the inputs to the Attention layer.
intermediate_size (`int`, *optional*, defaults to 14848):
Dimension of the MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 76):
Number of hidden layers in the model.
num_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer decoder.
attention_head_dim (`int`, *optional*):
Dimension of the attention head in the Transformer decoder.
num_key_value_heads (`int`, *optional*, defaults to 16):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`num_key_value_heads=None`, the model will use Multi Head Attention (MHA), if
`num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details, check out [this
paper](https://huggingface.co/papers/2305.13245).
n_mamba_heads (`int`, *optional*, defaults to 2):
Number of mamba heads for each mamba layer.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the decoder.
hidden_mamba_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the mamba layer.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
rms_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the rms normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
num_logits_to_keep (`int` or `None`, *optional*, defaults to 1):
Number of prompt logits to calculate during generation. If `None`, all logits will be calculated. If an
integer value, only last `num_logits_to_keep` logits will be calculated. Default is 1 because only the
logits of the last prompt token are needed for generation. For long sequences, the logits for the entire
sequence may use a lot of memory so, setting `num_logits_to_keep=1` will reduce memory footprint
significantly.
pad_token_id (`int`, *optional*, defaults to 0):
The id of the padding token.
bos_token_id (`int`, *optional*, defaults to 1):
The id of the "beginning-of-sequence" token.
eos_token_id (`int`, *optional*, defaults to 2):
The id of the "end-of-sequence" token.
max_position_embeddings (`int`, *optional*, defaults to 4096):
This value doesn't have any real effect. The maximum sequence length that this model is intended to be
used with. It can be used with longer sequences, but performance may degrade.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
attn_layer_period (`int`, *optional*, defaults to 6):
Once in this many layers, we will have a shared attention layer
attn_layer_offset (`int`, *optional*, defaults to 4):
Offset of the shared attention layer
use_mamba_kernels (`bool`, *optional*, defaults to `True`):
Flag indicating whether or not to use the fast mamba kernels. These are available only if `mamba-ssm` and
`causal-conv1d` are installed, and the mamba modules are running on a CUDA device. Raises ValueError if
`True` and kernels are not available
mamba_d_state (`int`, *optional*, defaults to 16):
The dimension the mamba state space latents
mamba_d_conv (`int`, *optional*, defaults to 4):
The size of the mamba convolution kernel
mamba_expand (`int`, *optional*, defaults to 2):
Expanding factor (relative to hidden_size) used to determine the mamba intermediate size
mamba_dt_rank (`Union[int,str]`, *optional*, defaults to `"auto"`):
Rank of the mamba discretization projection matrix. `"auto"` means that it will default to `math.ceil(self.hidden_size / 16)`
time_step_min (`float`, *optional*, defaults to 0.001):
Minimum `time_step` used to bound `dt_proj_bias`.
time_step_max (`float`, *optional*, defaults to 0.1):
Maximum `time_step` used to bound `dt_proj_bias`.
time_step_floor (`float`, *optional*, defaults to 0.0001):
Minimum clamping value of the `dt_proj.bias` layer initialization.
mamba_conv_bias (`bool`, *optional*, defaults to `True`):
Flag indicating whether or not to use bias in the convolution layer of the mamba mixer block.
mamba_proj_bias (`bool`, *optional*, defaults to `False`):
Flag indicating whether or not to use bias in the input and output projections (["in_proj", "out_proj"]) of the mamba mixer block
"""
model_type = 'zamba'
keys_to_ignore_at_inference = ['past_key_values']
def __init__(self, vocab_size=32000, tie_word_embeddings=True, hidden_size=3712, attention_hidden_size=None, intermediate_size=14848, num_hidden_layers=76, num_attention_heads=16, attention_head_dim=None, num_key_value_heads=16, n_mamba_heads=2, hidden_act='gelu', hidden_mamba_act='silu', initializer_range=0.02, rms_norm_eps=1e-05, use_cache=True, num_logits_to_keep=1, pad_token_id=0, bos_token_id=1, eos_token_id=2, max_position_embeddings=4096, attention_dropout=0.0, attn_layer_period=6, attn_layer_offset=4, use_mamba_kernels=True, mamba_d_state=16, mamba_d_conv=4, mamba_expand=2, mamba_dt_rank='auto', time_step_min=0.001, time_step_max=0.1, time_step_floor=0.0001, mamba_conv_bias=True, mamba_proj_bias=False, **kwargs):
self.vocab_size = vocab_size
self.tie_word_embeddings = tie_word_embeddings
self.hidden_size = hidden_size
if attention_hidden_size is None:
self.attention_hidden_size = 2 * hidden_size
else:
self.attention_hidden_size = attention_hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
if attention_head_dim is None:
self.attention_head_dim = 2 * self.hidden_size // self.num_attention_heads
else:
self.attention_head_dim = attention_head_dim
self.max_position_embeddings = max_position_embeddings
self.attention_dropout = attention_dropout
self.num_key_value_heads = num_key_value_heads
self.n_mamba_heads = n_mamba_heads
self.hidden_act = hidden_act
self.hidden_mamba_act = hidden_mamba_act
self.initializer_range = initializer_range
self.rms_norm_eps = rms_norm_eps
self.use_cache = use_cache
self.num_logits_to_keep = num_logits_to_keep
self.attn_layer_period = attn_layer_period
self.attn_layer_offset = attn_layer_offset
self.use_mamba_kernels = use_mamba_kernels
self.mamba_d_state = mamba_d_state
self.mamba_d_conv = mamba_d_conv
self.mamba_expand = mamba_expand
self.mamba_dt_rank = math.ceil(self.hidden_size / 16) if mamba_dt_rank == 'auto' else mamba_dt_rank
self.time_step_min = time_step_min
self.time_step_max = time_step_max
self.time_step_floor = time_step_floor
self.mamba_conv_bias = mamba_conv_bias
self.mamba_proj_bias = mamba_proj_bias
self.layers_block_type = self._layers_block_type(num_hidden_layers, attn_layer_period, attn_layer_offset)
assert self.mamba_expand * self.hidden_size % self.n_mamba_heads == 0, '`intermediate_size` should be divisible by `n_mamba_heads`.'
super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs)
def _layers_block_type(self, num_hidden_layers, attn_layer_period, attn_layer_offset):
layers = ['mamba', 'mamba', 'hybrid'] + ['hybrid' if i % attn_layer_period == attn_layer_offset else 'mamba' for i in range(num_hidden_layers - 3)]
return layers
|
class ZambaConfig(PretrainedConfig):
'''
This is the configuration class to store the configuration of a [`ZambaModel`]. It is used to instantiate a
Zamba model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the Zamba-v0.1 model.
[Zyphra/Zamba-7B-v1](https://huggingface.co/Zyphra/Zamba-7B-v1)
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 32000):
Vocabulary size of the Zamba model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`ZambaModel`]
tie_word_embeddings (`bool`, *optional*, defaults to `True`):
Whether the model's input and output word embeddings should be tied. Note that this is only relevant if the
model has a output word embedding layer.
hidden_size (`int`, *optional*, defaults to 3712):
Dimension of the hidden representations.
attention_hidden_size (`int`, *optional*):
Dimension of the hidden representations of the inputs to the Attention layer.
intermediate_size (`int`, *optional*, defaults to 14848):
Dimension of the MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 76):
Number of hidden layers in the model.
num_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer decoder.
attention_head_dim (`int`, *optional*):
Dimension of the attention head in the Transformer decoder.
num_key_value_heads (`int`, *optional*, defaults to 16):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`num_key_value_heads=None`, the model will use Multi Head Attention (MHA), if
`num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details, check out [this
paper](https://huggingface.co/papers/2305.13245).
n_mamba_heads (`int`, *optional*, defaults to 2):
Number of mamba heads for each mamba layer.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the decoder.
hidden_mamba_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the mamba layer.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
rms_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the rms normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
num_logits_to_keep (`int` or `None`, *optional*, defaults to 1):
Number of prompt logits to calculate during generation. If `None`, all logits will be calculated. If an
integer value, only last `num_logits_to_keep` logits will be calculated. Default is 1 because only the
logits of the last prompt token are needed for generation. For long sequences, the logits for the entire
sequence may use a lot of memory so, setting `num_logits_to_keep=1` will reduce memory footprint
significantly.
pad_token_id (`int`, *optional*, defaults to 0):
The id of the padding token.
bos_token_id (`int`, *optional*, defaults to 1):
The id of the "beginning-of-sequence" token.
eos_token_id (`int`, *optional*, defaults to 2):
The id of the "end-of-sequence" token.
max_position_embeddings (`int`, *optional*, defaults to 4096):
This value doesn't have any real effect. The maximum sequence length that this model is intended to be
used with. It can be used with longer sequences, but performance may degrade.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
attn_layer_period (`int`, *optional*, defaults to 6):
Once in this many layers, we will have a shared attention layer
attn_layer_offset (`int`, *optional*, defaults to 4):
Offset of the shared attention layer
use_mamba_kernels (`bool`, *optional*, defaults to `True`):
Flag indicating whether or not to use the fast mamba kernels. These are available only if `mamba-ssm` and
`causal-conv1d` are installed, and the mamba modules are running on a CUDA device. Raises ValueError if
`True` and kernels are not available
mamba_d_state (`int`, *optional*, defaults to 16):
The dimension the mamba state space latents
mamba_d_conv (`int`, *optional*, defaults to 4):
The size of the mamba convolution kernel
mamba_expand (`int`, *optional*, defaults to 2):
Expanding factor (relative to hidden_size) used to determine the mamba intermediate size
mamba_dt_rank (`Union[int,str]`, *optional*, defaults to `"auto"`):
Rank of the mamba discretization projection matrix. `"auto"` means that it will default to `math.ceil(self.hidden_size / 16)`
time_step_min (`float`, *optional*, defaults to 0.001):
Minimum `time_step` used to bound `dt_proj_bias`.
time_step_max (`float`, *optional*, defaults to 0.1):
Maximum `time_step` used to bound `dt_proj_bias`.
time_step_floor (`float`, *optional*, defaults to 0.0001):
Minimum clamping value of the `dt_proj.bias` layer initialization.
mamba_conv_bias (`bool`, *optional*, defaults to `True`):
Flag indicating whether or not to use bias in the convolution layer of the mamba mixer block.
mamba_proj_bias (`bool`, *optional*, defaults to `False`):
Flag indicating whether or not to use bias in the input and output projections (["in_proj", "out_proj"]) of the mamba mixer block
'''
def __init__(self, vocab_size=32000, tie_word_embeddings=True, hidden_size=3712, attention_hidden_size=None, intermediate_size=14848, num_hidden_layers=76, num_attention_heads=16, attention_head_dim=None, num_key_value_heads=16, n_mamba_heads=2, hidden_act='gelu', hidden_mamba_act='silu', initializer_range=0.02, rms_norm_eps=1e-05, use_cache=True, num_logits_to_keep=1, pad_token_id=0, bos_token_id=1, eos_token_id=2, max_position_embeddings=4096, attention_dropout=0.0, attn_layer_period=6, attn_layer_offset=4, use_mamba_kernels=True, mamba_d_state=16, mamba_d_conv=4, mamba_expand=2, mamba_dt_rank='auto', time_step_min=0.001, time_step_max=0.1, time_step_floor=0.0001, mamba_conv_bias=True, mamba_proj_bias=False, **kwargs):
pass
def _layers_block_type(self, num_hidden_layers, attn_layer_period, attn_layer_offset):
pass
| 3
| 1
| 49
| 4
| 46
| 0
| 3
| 0.96
| 1
| 2
| 0
| 0
| 2
| 31
| 2
| 2
| 199
| 15
| 94
| 73
| 55
| 90
| 44
| 37
| 41
| 4
| 1
| 1
| 6
|
6,331
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/zamba/modeling_zamba.py
|
transformers.models.zamba.modeling_zamba.ZambaAttention
|
from typing import Any, Callable, Optional, Union
from ...processing_utils import Unpack
from ...utils.deprecation import deprecate_kwarg
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from ...modeling_flash_attention_utils import FlashAttentionKwargs
from torch import nn
import torch
from .configuration_zamba import ZambaConfig
class ZambaAttention(nn.Module):
"""
Multi-headed attention from 'Attention Is All You Need' paper. Modified to use sliding window attention: Longformer
and "Generating Long Sequences with Sparse Transformers".
Adapted from transformers.models.mistral.modeling_mistral.MistralAttention:
The input dimension here is attention_hidden_size = 2 * hidden_size, and head_dim = attention_hidden_size // num_heads.
The extra factor of 2 comes from the input being the concatenation of original_hidden_states with the output of the previous (mamba) layer
(see fig. 2 in https://huggingface.co/papers/2405.16712).
Additionally, replaced
attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim) with
attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim/2)
"""
def __init__(self, config: ZambaConfig, layer_idx: int):
super().__init__()
self.config = config
self.layer_idx = layer_idx
self.attention_hidden_size = config.attention_hidden_size
self.head_dim = config.attention_head_dim
self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
self.max_position_embeddings = config.max_position_embeddings
self.scaling = (self.head_dim / 2) ** (-0.5)
self.is_causal = True
self.attention_dropout = config.attention_dropout
self.q_proj = nn.Linear(config.attention_hidden_size, config.num_attention_heads * self.head_dim, bias=False)
self.k_proj = nn.Linear(config.attention_hidden_size, config.num_key_value_heads * self.head_dim, bias=False)
self.v_proj = nn.Linear(config.attention_hidden_size, config.num_key_value_heads * self.head_dim, bias=False)
self.o_proj = nn.Linear(config.num_attention_heads * self.head_dim, config.hidden_size, bias=False)
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, layer_idx: int, attention_mask: Optional[torch.Tensor], past_key_values: Optional[ZambaHybridDynamicCache]=None, **kwargs: Unpack[FlashAttentionKwargs]) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, self.head_dim)
query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2)
key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2)
value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2)
if past_key_values is not None:
key_states, value_states = past_key_values.update(key_states, value_states, layer_idx)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != 'eager':
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(self, query_states, key_states, value_states, attention_mask, dropout=0.0 if not self.training else self.attention_dropout, scaling=self.scaling, **kwargs)
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
attn_output = self.o_proj(attn_output)
return (attn_output, attn_weights)
|
class ZambaAttention(nn.Module):
'''
Multi-headed attention from 'Attention Is All You Need' paper. Modified to use sliding window attention: Longformer
and "Generating Long Sequences with Sparse Transformers".
Adapted from transformers.models.mistral.modeling_mistral.MistralAttention:
The input dimension here is attention_hidden_size = 2 * hidden_size, and head_dim = attention_hidden_size // num_heads.
The extra factor of 2 comes from the input being the concatenation of original_hidden_states with the output of the previous (mamba) layer
(see fig. 2 in https://huggingface.co/papers/2405.16712).
Additionally, replaced
attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim) with
attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim/2)
'''
def __init__(self, config: ZambaConfig, layer_idx: int):
pass
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, layer_idx: int, attention_mask: Optional[torch.Tensor], past_key_values: Optional[ZambaHybridDynamicCache]=None, **kwargs: Unpack[FlashAttentionKwargs]) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
pass
| 4
| 1
| 30
| 4
| 26
| 0
| 3
| 0.21
| 1
| 6
| 3
| 1
| 2
| 13
| 2
| 12
| 74
| 10
| 53
| 30
| 43
| 11
| 33
| 23
| 30
| 5
| 1
| 2
| 6
|
6,332
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/zamba/modeling_zamba.py
|
transformers.models.zamba.modeling_zamba.ZambaAttentionDecoderLayer
|
from ...utils.deprecation import deprecate_kwarg
from .configuration_zamba import ZambaConfig
from typing import Any, Callable, Optional, Union
from torch import nn
import torch
from ...processing_utils import Unpack
from ...modeling_flash_attention_utils import FlashAttentionKwargs
class ZambaAttentionDecoderLayer(nn.Module):
def __init__(self, config: ZambaConfig, layer_idx: Optional[int]=None):
super().__init__()
self.self_attn = ZambaAttention(config, layer_idx)
self.feed_forward = ZambaMLP(config)
self.input_layernorm = ZambaRMSNorm(config.attention_hidden_size, eps=config.rms_norm_eps)
self.pre_ff_layernorm = ZambaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, original_hidden_states: torch.Tensor, layer_idx: int, attention_mask: Optional[torch.Tensor]=None, past_key_values: Optional[ZambaHybridDynamicCache]=None, output_attentions: Optional[bool]=False, use_cache: Optional[bool]=False, **kwargs: Unpack[FlashAttentionKwargs]) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]:
"""
Args:
hidden_states (`torch.FloatTensor`): output of previous Mamba layer of shape `(batch, seq_len, embed_dim)`
original_hidden_states (`torch.FloatTensor`): word embedding output of shape `(batch, seq_len, embed_dim)`.
This is concatenated with `hidden_states` (which is the output of the previous (mamba) layer). The
concatenated tensor is then used as input of the pre-attention RMSNorm
(see fig. 2 in https://huggingface.co/papers/2405.16712).
layer_idx (`int`): layer_idx in the forward pass. Used to distinguish Zamba's tied transformer layers.
attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
`(batch, sequence_length)` where padding elements are indicated by 0.
past_key_values (`ZambaHybridDynamicCache`, *optional*): cached past key and value projection states
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
(see `past_key_values`).
cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
Indices depicting the position of the input sequence tokens in the sequence.
"""
hidden_states = torch.concatenate([hidden_states, original_hidden_states], dim=-1)
hidden_states = self.input_layernorm(hidden_states)
hidden_states, self_attn_weights = self.self_attn(hidden_states=hidden_states, layer_idx=layer_idx, attention_mask=attention_mask, past_key_values=past_key_values, output_attentions=output_attentions, use_cache=use_cache, **kwargs)
hidden_states = self.pre_ff_layernorm(hidden_states)
hidden_states = self.feed_forward(hidden_states)
outputs = (hidden_states,)
if output_attentions:
outputs += (self_attn_weights,)
return outputs
|
class ZambaAttentionDecoderLayer(nn.Module):
def __init__(self, config: ZambaConfig, layer_idx: Optional[int]=None):
pass
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, original_hidden_states: torch.Tensor, layer_idx: int, attention_mask: Optional[torch.Tensor]=None, past_key_values: Optional[ZambaHybridDynamicCache]=None, output_attentions: Optional[bool]=False, use_cache: Optional[bool]=False, **kwargs: Unpack[FlashAttentionKwargs]) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]:
'''
Args:
hidden_states (`torch.FloatTensor`): output of previous Mamba layer of shape `(batch, seq_len, embed_dim)`
original_hidden_states (`torch.FloatTensor`): word embedding output of shape `(batch, seq_len, embed_dim)`.
This is concatenated with `hidden_states` (which is the output of the previous (mamba) layer). The
concatenated tensor is then used as input of the pre-attention RMSNorm
(see fig. 2 in https://huggingface.co/papers/2405.16712).
layer_idx (`int`): layer_idx in the forward pass. Used to distinguish Zamba's tied transformer layers.
attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
`(batch, sequence_length)` where padding elements are indicated by 0.
past_key_values (`ZambaHybridDynamicCache`, *optional*): cached past key and value projection states
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
(see `past_key_values`).
cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
Indices depicting the position of the input sequence tokens in the sequence.
'''
pass
| 4
| 1
| 30
| 2
| 17
| 11
| 2
| 0.6
| 1
| 10
| 6
| 1
| 2
| 4
| 2
| 12
| 61
| 5
| 35
| 19
| 22
| 21
| 17
| 9
| 14
| 2
| 1
| 1
| 3
|
6,333
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/zamba/modeling_zamba.py
|
transformers.models.zamba.modeling_zamba.ZambaForCausalLM
|
from ...utils import auto_docstring, logging
from typing import Any, Callable, Optional, Union
from torch import nn
import torch
from .configuration_zamba import ZambaConfig
from ...generation import GenerationMixin
from ...modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast, SequenceClassifierOutputWithPast
class ZambaForCausalLM(ZambaPreTrainedModel, GenerationMixin):
def __init__(self, config: ZambaConfig):
super().__init__(config)
self.model = ZambaModel(config)
self._tied_weights_keys = ['lm_head.weight', *self.model._tied_weights_keys]
self.vocab_size = config.vocab_size
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.post_init()
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[ZambaHybridDynamicCache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, logits_to_keep: Union[int, torch.Tensor]=0, **kwargs) -> Union[tuple, CausalLMOutputWithPast]:
"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Example:
```python
>>> from transformers import AutoTokenizer, ZambaForCausalLM
>>> model = ZambaForCausalLM.from_pretrained("Zyphra/Zamba-7B-v1")
>>> tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba-7B-v1")
>>> prompt = "Hey, are you conscious? Can you talk to me?"
>>> inputs = tokenizer(prompt, return_tensors="pt")
>>> # Generate
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"Hey, are you conscious? Can you talk to me?\\nI'm not conscious, but I can talk to you."
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.model(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, cache_position=cache_position, return_dict=return_dict)
hidden_states = outputs[0]
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :])
loss = None
if labels is not None:
loss = self.loss_function(logits, labels, self.vocab_size, **kwargs)
if not return_dict:
output = (logits,) + outputs[1:]
return (loss,) + output if loss is not None else output
return CausalLMOutputWithPast(loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
def prepare_inputs_for_generation(self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, cache_position=None, position_ids=None, use_cache=True, **kwargs):
empty_past_kv = past_key_values is None
if not empty_past_kv:
if inputs_embeds is not None or cache_position[-1] >= input_ids.shape[1]:
input_ids = input_ids[:, -cache_position.shape[0]:]
elif input_ids.shape[1] != cache_position.shape[0]:
input_ids = input_ids[:, cache_position]
else:
past_key_values = ZambaHybridDynamicCache(self.config, input_ids.shape[0], dtype=self.dtype, device=self.device)
if attention_mask is not None and position_ids is None:
position_ids = attention_mask.long().cumsum(-1) - 1
position_ids.masked_fill_(attention_mask == 0, 1)
if not empty_past_kv:
position_ids = position_ids[:, -input_ids.shape[1]:]
if inputs_embeds is not None and empty_past_kv:
model_inputs = {'inputs_embeds': inputs_embeds}
else:
model_inputs = {'input_ids': input_ids.contiguous()}
model_inputs.update({'position_ids': position_ids, 'past_key_values': past_key_values, 'use_cache': use_cache, 'attention_mask': attention_mask, 'logits_to_keep': self.config.num_logits_to_keep, 'cache_position': cache_position})
for key, value in kwargs.items():
if key not in model_inputs:
model_inputs[key] = value
return model_inputs
|
class ZambaForCausalLM(ZambaPreTrainedModel, GenerationMixin):
def __init__(self, config: ZambaConfig):
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[ZambaHybridDynamicCache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, logits_to_keep: Union[int, torch.Tensor]=0, **kwargs) -> Union[tuple, CausalLMOutputWithPast]:
'''
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Example:
```python
>>> from transformers import AutoTokenizer, ZambaForCausalLM
>>> model = ZambaForCausalLM.from_pretrained("Zyphra/Zamba-7B-v1")
>>> tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba-7B-v1")
>>> prompt = "Hey, are you conscious? Can you talk to me?"
>>> inputs = tokenizer(prompt, return_tensors="pt")
>>> # Generate
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
```'''
pass
def prepare_inputs_for_generation(self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, cache_position=None, position_ids=None, use_cache=True, **kwargs):
pass
| 5
| 1
| 19
| 2
| 12
| 5
| 2
| 0.35
| 2
| 9
| 4
| 1
| 9
| 6
| 9
| 11
| 181
| 28
| 116
| 49
| 78
| 41
| 52
| 22
| 42
| 8
| 2
| 2
| 22
|
6,334
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/zamba/modeling_zamba.py
|
transformers.models.zamba.modeling_zamba.ZambaForSequenceClassification
|
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from typing import Any, Callable, Optional, Union
from torch import nn
from ...modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast, SequenceClassifierOutputWithPast
from ...utils import auto_docstring, logging
import torch
from ...cache_utils import Cache
@auto_docstring(custom_intro='\n The Zamba Model with a sequence classification head on top (linear layer).\n\n [`ZambaForSequenceClassification`] uses the last token in order to do the classification, as other causal models\n (e.g. GPT-2) do.\n\n Since it does classification on the last token, it requires to know the position of the last token. If a\n `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If\n no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the\n padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in\n each row of the batch).\n ')
class ZambaForSequenceClassification(ZambaPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.model = ZambaModel(config)
self._tied_weights_keys = self.model._tied_weights_keys
self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False)
self.post_init()
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Union[Cache, list[torch.FloatTensor]]]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, SequenceClassifierOutputWithPast]:
"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
transformer_outputs = self.model(input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
hidden_states = transformer_outputs[0]
logits = self.score(hidden_states)
if input_ids is not None:
batch_size = input_ids.shape[0]
else:
batch_size = inputs_embeds.shape[0]
if self.config.pad_token_id is None and batch_size != 1:
raise ValueError('Cannot handle batch sizes > 1 if no padding token is defined.')
if self.config.pad_token_id is None:
last_non_pad_token = -1
elif input_ids is not None:
non_pad_mask = (input_ids != self.config.pad_token_id).to(logits.device, torch.int32)
token_indices = torch.arange(input_ids.shape[-1], device=logits.device, dtype=torch.int32)
last_non_pad_token = (token_indices * non_pad_mask).argmax(-1)
else:
last_non_pad_token = -1
logger.warning_once(f'{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be unexpected if using padding tokens in conjunction with `inputs_embeds.`')
pooled_logits = logits[torch.arange(batch_size, device=logits.device), last_non_pad_token]
loss = None
if labels is not None:
labels = labels.to(logits.device)
if self.config.problem_type is None:
if self.num_labels == 1:
self.config.problem_type = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = 'single_label_classification'
else:
self.config.problem_type = 'multi_label_classification'
if self.config.problem_type == 'regression':
loss_fct = MSELoss()
if self.num_labels == 1:
loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(pooled_logits, labels)
elif self.config.problem_type == 'single_label_classification':
loss_fct = CrossEntropyLoss()
loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))
elif self.config.problem_type == 'multi_label_classification':
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(pooled_logits, labels)
if not return_dict:
output = (pooled_logits,) + transformer_outputs[1:]
return (loss,) + output if loss is not None else output
return SequenceClassifierOutputWithPast(loss=loss, logits=pooled_logits, past_key_values=transformer_outputs.past_key_values, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions)
| null | 5
| 1
| 26
| 2
| 22
| 2
| 5
| 0.09
| 1
| 8
| 3
| 1
| 4
| 4
| 4
| 6
| 110
| 11
| 91
| 33
| 73
| 8
| 51
| 20
| 46
| 16
| 2
| 3
| 19
|
6,335
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/zamba/modeling_zamba.py
|
transformers.models.zamba.modeling_zamba.ZambaHybridDynamicCache
|
import torch
from typing import Any, Callable, Optional, Union
class ZambaHybridDynamicCache:
"""
A dynamic cache that can handle both the attention cache (which has a seq_len dimension) and the mamba cache
(which has a constant shape regardless of seq_len).
This cache has two sets of lists of tensors: `key_cache` and `value_cache` for attention cache and `conv_states`
and `ssm_states` for mamba cache. Each of these lists has `num_layers` tensors. The expected shape for each tensor
For attention layers, `key_cache` and `value_cache` have a shape of `(batch_size, num_heads, seq_len, head_dim)`,
while `conv_states` and `ssm_states` have a shape of `(batch_size, 0)` (empty tensors).
For mamba layers, `key_cache` and `value_cache` have a shape of `(batch_size, 0)` (empty tensors),
while `conv_states` represents the convolution state and has a shape of `(batch_size, d_inner, d_conv)`,
and `ssm_states` represents the ssm state and has a shape of `(batch_size, d_inner, d_state)`.
"""
is_compileable = False
def __init__(self, config, batch_size, dtype=torch.float16, device=None):
self.dtype = dtype
self.is_compileable = False
self.layers_block_type = config.layers_block_type
self.has_previous_state = False
self.intermediate_size = config.mamba_expand * config.hidden_size
self.ssm_state_size = config.mamba_d_state
self.conv_kernel_size = config.mamba_d_conv
self.n_mamba_heads = config.n_mamba_heads
self.conv_states = []
self.ssm_states = []
self.transformer_layers = []
self._modules = {}
self._parameters = {}
self._buffers = {}
for i in range(config.num_hidden_layers):
self.conv_states += [torch.zeros(batch_size, self.intermediate_size, self.conv_kernel_size, device=device, dtype=dtype)]
cache_shape = (batch_size, self.n_mamba_heads, self.intermediate_size // self.n_mamba_heads, self.ssm_state_size)
self.ssm_states += [torch.zeros(cache_shape, device=device, dtype=dtype)]
if self.layers_block_type[i] == 'hybrid':
self.transformer_layers.append(i)
self.key_cache = [torch.tensor([[]] * batch_size, device=device) for _ in range(config.num_hidden_layers)]
self.value_cache = [torch.tensor([[]] * batch_size, device=device) for _ in range(config.num_hidden_layers)]
def __len__(self):
return len(self.key_cache)
def __getitem__(self, layer_idx: int) -> tuple[torch.Tensor, torch.Tensor]:
return (self.key_cache[layer_idx], self.value_cache[layer_idx])
def update(self, key_states: torch.Tensor, value_states: torch.Tensor, layer_idx: int, cache_kwargs: Optional[dict[str, Any]]=None) -> tuple[torch.Tensor, torch.Tensor]:
if self.key_cache[layer_idx].shape[-1] == 0:
self.key_cache[layer_idx] = key_states
self.value_cache[layer_idx] = value_states
else:
self.key_cache[layer_idx] = torch.cat([self.key_cache[layer_idx], key_states], dim=2)
self.value_cache[layer_idx] = torch.cat([self.value_cache[layer_idx], value_states], dim=2)
return (self.key_cache[layer_idx], self.value_cache[layer_idx])
def reorder_cache(self, beam_idx: torch.LongTensor):
"""Reorders the cache for beam search, given the selected beam indices."""
for layer_idx in range(len(self.key_cache)):
device = self.key_cache[layer_idx].device
self.key_cache[layer_idx] = self.key_cache[layer_idx].index_select(0, beam_idx.to(device))
device = self.value_cache[layer_idx].device
self.value_cache[layer_idx] = self.value_cache[layer_idx].index_select(0, beam_idx.to(device))
device = self.conv_states[layer_idx].device
self.conv_states[layer_idx] = self.conv_states[layer_idx].index_select(0, beam_idx.to(device))
device = self.ssm_states[layer_idx].device
self.ssm_states[layer_idx] = self.ssm_states[layer_idx].index_select(0, beam_idx.to(device))
def get_seq_length(self, layer_idx: Optional[int]=0) -> int:
"""Returns the sequence length of the cached states. A layer index can be optionally passed."""
layer_idx = self.transformer_layers[0] if layer_idx not in self.transformer_layers else layer_idx
if len(self.key_cache) <= layer_idx:
return 0
return self.key_cache[layer_idx].shape[-2]
| null | 7
| 3
| 12
| 1
| 10
| 1
| 2
| 0.3
| 1
| 6
| 0
| 1
| 5
| 15
| 6
| 37
| 92
| 10
| 64
| 33
| 50
| 19
| 49
| 26
| 42
| 3
| 3
| 2
| 12
|
6,336
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/zamba/modeling_zamba.py
|
transformers.models.zamba.modeling_zamba.ZambaHybridLayer
|
import torch
from ...utils.deprecation import deprecate_kwarg
from typing import Any, Callable, Optional, Union
from torch import nn
class ZambaHybridLayer(nn.Module):
def __init__(self, shared_transf: ZambaAttentionDecoderLayer, linear: nn.Linear, mamba: ZambaMambaDecoderLayer):
super().__init__()
self.shared_transf = shared_transf
self.linear = linear
self.mamba_decoder = mamba
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, original_hidden_states: Optional[torch.Tensor]=None, layer_idx: Optional[int]=None, attention_mask: Optional[torch.Tensor]=None, causal_mask: Optional[torch.Tensor]=None, past_key_values: Optional[ZambaHybridDynamicCache]=None, output_attentions: Optional[bool]=False, use_cache: Optional[bool]=False, cache_position: Optional[torch.LongTensor]=None) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]:
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
original_hidden_states (`torch.FloatTensor`): word embedding output that will be concatenated with
hidden activations to form the input of the shared transformer layer.
layer_idx (`int`): layer number.
attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
`(batch, sequence_length)` where padding elements are indicated by 0.
past_key_values (`ZambaHybridDynamicCache`, *optional*): cached past key and value projection states
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
(see `past_key_values`).
cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
Indices depicting the position of the input sequence tokens in the sequence.
"""
layer_outputs = self.shared_transf(hidden_states, original_hidden_states=original_hidden_states, layer_idx=layer_idx, attention_mask=causal_mask, past_key_values=past_key_values, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position)
transformer_hidden_states = layer_outputs[0]
if output_attentions:
self_attn_weights = layer_outputs[1]
transformer_hidden_states = self.linear(transformer_hidden_states)
layer_outputs = self.mamba_decoder(hidden_states, transformer_hidden_states=transformer_hidden_states, attention_mask=attention_mask, past_key_values=past_key_values, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position)
if output_attentions:
layer_outputs = (layer_outputs[0], self_attn_weights) + layer_outputs[2:]
return layer_outputs
|
class ZambaHybridLayer(nn.Module):
def __init__(self, shared_transf: ZambaAttentionDecoderLayer, linear: nn.Linear, mamba: ZambaMambaDecoderLayer):
pass
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, original_hidden_states: Optional[torch.Tensor]=None, layer_idx: Optional[int]=None, attention_mask: Optional[torch.Tensor]=None, causal_mask: Optional[torch.Tensor]=None, past_key_values: Optional[ZambaHybridDynamicCache]=None, output_attentions: Optional[bool]=False, use_cache: Optional[bool]=False, cache_position: Optional[torch.LongTensor]=None) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]:
'''
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
original_hidden_states (`torch.FloatTensor`): word embedding output that will be concatenated with
hidden activations to form the input of the shared transformer layer.
layer_idx (`int`): layer number.
attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
`(batch, sequence_length)` where padding elements are indicated by 0.
past_key_values (`ZambaHybridDynamicCache`, *optional*): cached past key and value projection states
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
(see `past_key_values`).
cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
Indices depicting the position of the input sequence tokens in the sequence.
'''
pass
| 4
| 1
| 34
| 4
| 22
| 9
| 2
| 0.41
| 1
| 7
| 3
| 1
| 2
| 3
| 2
| 12
| 70
| 8
| 44
| 20
| 30
| 18
| 16
| 9
| 13
| 3
| 1
| 1
| 4
|
6,337
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/zamba/modeling_zamba.py
|
transformers.models.zamba.modeling_zamba.ZambaMLP
|
from torch import nn
from ...activations import ACT2FN
class ZambaMLP(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.hidden_size = config.hidden_size
self.intermediate_size = config.intermediate_size
self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
self.act_fn = ACT2FN[config.hidden_act]
def forward(self, x):
down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
return down_proj
|
class ZambaMLP(nn.Module):
def __init__(self, config):
pass
def forward(self, x):
pass
| 3
| 0
| 6
| 0
| 6
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 2
| 7
| 2
| 12
| 14
| 1
| 13
| 11
| 10
| 0
| 13
| 11
| 10
| 1
| 1
| 0
| 2
|
6,338
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/zamba/modeling_zamba.py
|
transformers.models.zamba.modeling_zamba.ZambaMambaDecoderLayer
|
from ...utils.deprecation import deprecate_kwarg
from .configuration_zamba import ZambaConfig
from torch import nn
from typing import Any, Callable, Optional, Union
import torch
class ZambaMambaDecoderLayer(nn.Module):
def __init__(self, config: ZambaConfig, layer_idx: int):
super().__init__()
self.mamba = ZambaMambaMixer(config=config, layer_idx=layer_idx)
self.input_layernorm = ZambaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.layer_idx = layer_idx
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, original_hidden_states: Optional[torch.Tensor]=None, layer_idx: Optional[int]=None, attention_mask: Optional[torch.Tensor]=None, causal_mask: Optional[torch.Tensor]=None, past_key_values: Optional[ZambaHybridDynamicCache]=None, output_attentions: Optional[bool]=False, use_cache: Optional[bool]=False, cache_position: Optional[torch.LongTensor]=None, transformer_hidden_states: Optional[torch.Tensor]=None, **kwargs) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]:
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
`(batch, sequence_length)` where padding elements are indicated by 0.
past_key_values (`ZambaHybridDynamicCache`, *optional*): cached past key and value projection states
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
(see `past_key_values`).
cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
Indices depicting the position of the input sequence tokens in the sequence.
"""
residual = hidden_states
hidden_states = hidden_states + transformer_hidden_states if transformer_hidden_states is not None else hidden_states
hidden_states = self.input_layernorm(hidden_states)
hidden_states = self.mamba(hidden_states=hidden_states, cache_params=past_key_values, attention_mask=attention_mask)
self_attn_weights = None
hidden_states = residual + hidden_states
outputs = (hidden_states,)
if output_attentions:
outputs += (self_attn_weights,)
if use_cache:
outputs += (past_key_values,)
return outputs
|
class ZambaMambaDecoderLayer(nn.Module):
def __init__(self, config: ZambaConfig, layer_idx: int):
pass
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, original_hidden_states: Optional[torch.Tensor]=None, layer_idx: Optional[int]=None, attention_mask: Optional[torch.Tensor]=None, causal_mask: Optional[torch.Tensor]=None, past_key_values: Optional[ZambaHybridDynamicCache]=None, output_attentions: Optional[bool]=False, use_cache: Optional[bool]=False, cache_position: Optional[torch.LongTensor]=None, transformer_hidden_states: Optional[torch.Tensor]=None, **kwargs) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]:
'''
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
`(batch, sequence_length)` where padding elements are indicated by 0.
past_key_values (`ZambaHybridDynamicCache`, *optional*): cached past key and value projection states
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
(see `past_key_values`).
cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
Indices depicting the position of the input sequence tokens in the sequence.
'''
pass
| 4
| 1
| 32
| 5
| 19
| 9
| 3
| 0.47
| 1
| 8
| 4
| 1
| 2
| 3
| 2
| 12
| 66
| 10
| 38
| 22
| 22
| 18
| 19
| 9
| 16
| 4
| 1
| 1
| 5
|
6,339
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/zamba/modeling_zamba.py
|
transformers.models.zamba.modeling_zamba.ZambaMambaMixer
|
from .configuration_zamba import ZambaConfig
from torch import nn
import torch
from ...activations import ACT2FN
class ZambaMambaMixer(nn.Module):
"""
Compute ∆, A, B, C, and D the state space parameters and compute the `contextualized_states`.
A, D are input independent (see Mamba paper [1] Section 3.5.2 "Interpretation of A" for why A isn't selective)
∆, B, C are input-dependent (this is a key difference between Mamba and the linear time invariant S4,
and is why Mamba is called **selective** state spaces)
This module differs from `transformers.models.mamba.modeling_mamba.MambaMixer` in two ways:
- Added multi-head: the output of `self.in_proj` is split into `self.n_mamba_heads` heads, and each head
undergoes an independent forward pass, identical to the original `MambaMixer`, up until the pre-activations of
`self.out_proj`. The pre-activations, coming from different mamba heads, are then concatenated and fed into `self.out_proj`.
"""
def __init__(self, config: ZambaConfig, layer_idx):
super().__init__()
self.config = config
self.layer_idx = layer_idx
self.hidden_size = config.hidden_size
self.ssm_state_size = config.mamba_d_state
self.conv_kernel_size = config.mamba_d_conv
self.intermediate_size = config.mamba_expand * config.hidden_size
self.time_step_rank = config.mamba_dt_rank
self.n_mamba_heads = config.n_mamba_heads
self.mamba_head_dim = self.intermediate_size // self.n_mamba_heads
self.use_conv_bias = config.mamba_conv_bias
self.use_bias = config.mamba_proj_bias
self.conv1d = nn.Conv1d(in_channels=self.intermediate_size, out_channels=self.intermediate_size, bias=self.use_conv_bias, kernel_size=self.conv_kernel_size, groups=self.intermediate_size, padding=self.conv_kernel_size - 1)
self.activation = config.hidden_mamba_act
self.act = ACT2FN[config.hidden_mamba_act]
self.use_fast_kernels = config.use_mamba_kernels
self.in_proj = nn.Linear(self.hidden_size, self.intermediate_size * 2, bias=self.use_bias)
self.x_proj_weight = nn.Parameter(torch.zeros(self.n_mamba_heads, self.time_step_rank + self.ssm_state_size * 2, self.mamba_head_dim))
self.dt_proj_weight = nn.Parameter((torch.zeros(self.n_mamba_heads, self.mamba_head_dim, self.time_step_rank) - 0.5) * 2 / self.time_step_rank ** 0.5)
self.dt_proj_bias = nn.Parameter(torch.zeros(self.n_mamba_heads, self.mamba_head_dim))
A = torch.arange(1, self.ssm_state_size + 1, dtype=torch.float32)[None, :]
A = A.expand(self.intermediate_size, -1).contiguous()
self.A_log = nn.Parameter(torch.log(A).reshape(self.n_mamba_heads, self.mamba_head_dim, -1))
self.D = nn.Parameter(torch.ones(self.n_mamba_heads, self.mamba_head_dim))
self.out_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=self.use_bias)
if not is_fast_path_available:
logger.warning_once('The fast path is not available because on of `(selective_state_update, selective_scan_fn, causal_conv1d_fn, causal_conv1d_update, mamba_inner_fn)` is None. To install follow https://github.com/state-spaces/mamba/#installation and https://github.com/Dao-AILab/causal-conv1d. If you want to use the naive implementation, set `use_mamba_kernels=False` in the model config')
def cuda_kernels_forward(self, hidden_states: torch.Tensor, cache_params: ZambaHybridDynamicCache=None, attention_mask=None):
batch_size, seq_len, _ = hidden_states.shape
use_precomputed_states = cache_params is not None and cache_params.has_previous_state and (seq_len == 1)
projected_states = self.in_proj(hidden_states).transpose(1, 2)
hidden_states, gate = projected_states.view(batch_size, -1, 2, seq_len).chunk(2, dim=2)
hidden_states = hidden_states.squeeze(2).contiguous()
gate = gate.squeeze(2)
gate = gate.reshape(batch_size, self.n_mamba_heads, -1, seq_len).transpose(0, 1)
conv_weights = self.conv1d.weight.view(self.conv1d.weight.size(0), self.conv1d.weight.size(2))
if use_precomputed_states:
hidden_states = causal_conv1d_update(hidden_states.squeeze(-1), cache_params.conv_states[self.layer_idx], conv_weights, self.conv1d.bias, self.activation)
hidden_states = hidden_states.unsqueeze(-1)
else:
if attention_mask is not None and (not torch.all(attention_mask == 1)):
hidden_states = hidden_states * attention_mask.unsqueeze(1)
if cache_params is not None:
conv_states = nn.functional.pad(hidden_states, (self.conv_kernel_size - hidden_states.shape[-1], 0))
cache_params.conv_states[self.layer_idx].copy_(conv_states)
hidden_states = causal_conv1d_fn(hidden_states, conv_weights, self.conv1d.bias, activation=self.activation)
if attention_mask is not None and (not torch.all(attention_mask == 1)):
hidden_states = hidden_states * attention_mask.unsqueeze(1)
hidden_states = hidden_states.reshape(-1, self.n_mamba_heads, self.mamba_head_dim, seq_len).transpose(0, 1)
ssm_parameters = (self.x_proj_weight[:, None, :, :] @ hidden_states).transpose(-1, -2)
time_step, B, C = torch.split(ssm_parameters, [self.time_step_rank, self.ssm_state_size, self.ssm_state_size], dim=-1)
discrete_time_step = self.dt_proj_weight[:, None] @ time_step.transpose(-1, -2)
A = -torch.exp(self.A_log.float())
time_proj_bias = self.dt_proj_bias.float() if self.dt_proj_bias is not None else None
scan_outputs = torch.empty((batch_size, 0, seq_len), device=hidden_states.device, dtype=hidden_states.dtype)
if use_precomputed_states:
for n in range(self.n_mamba_heads):
scan_outputs_ = selective_state_update(cache_params.ssm_states[self.layer_idx][:, n], hidden_states[n, ..., 0], discrete_time_step[n, ..., 0], A[n], B[n, :, 0], C[n, :, 0], self.D[n], gate[n, ..., 0], time_proj_bias[n], dt_softplus=True).unsqueeze(-1)
scan_outputs = torch.cat((scan_outputs, scan_outputs_), dim=1)
else:
ssm_state = torch.empty((batch_size, 0, self.mamba_head_dim, self.ssm_state_size), device=hidden_states.device, dtype=hidden_states.dtype)
for n in range(self.n_mamba_heads):
scan_outputs_, ssm_state_ = selective_scan_fn(hidden_states[n], discrete_time_step[n], A[n], B[n].transpose(1, 2), C[n].transpose(1, 2), self.D[n].float(), gate[n], time_proj_bias[n], delta_softplus=True, return_last_state=True)
scan_outputs = torch.cat((scan_outputs, scan_outputs_), dim=1).contiguous()
ssm_state = torch.cat((ssm_state, ssm_state_.unsqueeze(1)), dim=1)
if ssm_state is not None and cache_params is not None:
cache_params.ssm_states[self.layer_idx].copy_(ssm_state)
contextualized_states = self.out_proj(scan_outputs.transpose(1, 2))
return contextualized_states
def slow_forward(self, input_states, cache_params: ZambaHybridDynamicCache=None, attention_mask=None):
batch_size, seq_len, _ = input_states.shape
dtype = input_states.dtype
projected_states = self.in_proj(input_states).transpose(1, 2)
hidden_states, gate = projected_states.view(batch_size, -1, 2, seq_len).chunk(2, dim=2)
hidden_states = hidden_states.squeeze(2).contiguous()
gate = gate.squeeze(2)
gate = gate.reshape(batch_size, self.n_mamba_heads, -1, seq_len).transpose(0, 1)
use_cache = isinstance(cache_params, ZambaHybridDynamicCache)
if use_cache and cache_params.ssm_states[self.layer_idx].shape[0] == batch_size:
if self.training:
ssm_state = cache_params.ssm_states[self.layer_idx].clone()
else:
ssm_state = cache_params.ssm_states[self.layer_idx]
ssm_state = ssm_state.to(hidden_states.device)
if cache_params.has_previous_state and seq_len == 1 and (cache_params.conv_states[self.layer_idx].shape[0] == batch_size):
conv_state = cache_params.conv_states[self.layer_idx]
conv_state = torch.roll(conv_state, shifts=-1, dims=-1)
conv_state[:, :, -1] = hidden_states[:, :, 0]
cache_params.conv_states[self.layer_idx] = conv_state
hidden_states = torch.sum(conv_state * self.conv1d.weight[:, 0, :], dim=-1)
if self.use_conv_bias:
hidden_states += self.conv1d.bias
hidden_states = self.act(hidden_states).to(dtype).unsqueeze(-1)
else:
if attention_mask is not None and (not torch.all(attention_mask == 1)):
hidden_states = hidden_states * attention_mask[:, -hidden_states.shape[-1]:].unsqueeze(1)
conv_state = nn.functional.pad(hidden_states, (self.conv_kernel_size - hidden_states.shape[-1], 0))
cache_params.conv_states[self.layer_idx] = conv_state
hidden_states = self.act(self.conv1d(hidden_states)[..., :seq_len])
if attention_mask is not None and (not torch.all(attention_mask == 1)):
hidden_states = hidden_states * attention_mask[:, -hidden_states.shape[-1]:].unsqueeze(1)
else:
ssm_state = torch.zeros((batch_size, self.n_mamba_heads, self.mamba_head_dim, self.ssm_state_size), device=hidden_states.device, dtype=dtype)
if attention_mask is not None and (not torch.all(attention_mask == 1)):
hidden_states = hidden_states * attention_mask.unsqueeze(1)
hidden_states = self.act(self.conv1d(hidden_states)[..., :seq_len])
if attention_mask is not None and (not torch.all(attention_mask == 1)):
hidden_states = hidden_states * attention_mask.unsqueeze(1)
hidden_states = hidden_states.reshape(-1, self.n_mamba_heads, self.mamba_head_dim, seq_len).transpose(0, 1)
ssm_parameters = (self.x_proj_weight[:, None, :, :] @ hidden_states).transpose(-1, -2)
time_step, B, C = torch.split(ssm_parameters, [self.time_step_rank, self.ssm_state_size, self.ssm_state_size], dim=-1)
discrete_time_step = self.dt_proj_weight[:, None] @ time_step.transpose(-1, -2) + self.dt_proj_bias[:, None, :, None]
discrete_time_step = nn.functional.softplus(discrete_time_step)
A = -torch.exp(self.A_log.float())
discrete_A = torch.exp(A[:, None, :, None, :] * discrete_time_step[:, :, :, :, None])
discrete_B = discrete_time_step[:, :, :, :, None] * B[:, :, None, :, :].float()
deltaB_u = discrete_B * hidden_states[:, :, :, :, None].float()
scan_outputs = []
for i in range(seq_len):
ssm_state = discrete_A[:, :, :, i, :].transpose(0, 1) * ssm_state + deltaB_u[:, :, :, i, :].transpose(0, 1)
scan_output = torch.matmul(ssm_state.transpose(0, 1).to(dtype), C[:, :, i, :].unsqueeze(-1))
scan_outputs.append(scan_output[:, :, :, 0])
scan_output = torch.stack(scan_outputs, dim=-1)
scan_output = scan_output + hidden_states * self.D[:, None, :, None]
scan_output = scan_output * self.act(gate)
if use_cache:
cache_params.ssm_states[self.layer_idx] = ssm_state
contextualized_states = self.out_proj(scan_output.transpose(0, 1).reshape(batch_size, -1, seq_len).transpose(1, 2))
return contextualized_states
def forward(self, hidden_states, cache_params: ZambaHybridDynamicCache=None, attention_mask=None):
if self.use_fast_kernels:
if not is_fast_path_available or 'cuda' not in self.x_proj_weight.device.type:
raise ValueError("Fast Mamba kernels are not available. Make sure to they are installed and that the mamba module is on a CUDA device. lease run 'pip install causal-conv1d>=1.2.0' and 'pip install mamba-ssm', or set use_mamba_kernels=False in the model's config.")
return self.cuda_kernels_forward(hidden_states, cache_params, attention_mask=attention_mask)
return self.slow_forward(hidden_states, cache_params, attention_mask=attention_mask)
|
class ZambaMambaMixer(nn.Module):
'''
Compute ∆, A, B, C, and D the state space parameters and compute the `contextualized_states`.
A, D are input independent (see Mamba paper [1] Section 3.5.2 "Interpretation of A" for why A isn't selective)
∆, B, C are input-dependent (this is a key difference between Mamba and the linear time invariant S4,
and is why Mamba is called **selective** state spaces)
This module differs from `transformers.models.mamba.modeling_mamba.MambaMixer` in two ways:
- Added multi-head: the output of `self.in_proj` is split into `self.n_mamba_heads` heads, and each head
undergoes an independent forward pass, identical to the original `MambaMixer`, up until the pre-activations of
`self.out_proj`. The pre-activations, coming from different mamba heads, are then concatenated and fed into `self.out_proj`.
'''
def __init__(self, config: ZambaConfig, layer_idx):
pass
def cuda_kernels_forward(self, hidden_states: torch.Tensor, cache_params: ZambaHybridDynamicCache=None, attention_mask=None):
pass
def slow_forward(self, input_states, cache_params: ZambaHybridDynamicCache=None, attention_mask=None):
pass
def forward(self, hidden_states, cache_params: ZambaHybridDynamicCache=None, attention_mask=None):
pass
| 5
| 1
| 65
| 7
| 53
| 5
| 7
| 0.14
| 1
| 6
| 2
| 0
| 4
| 22
| 4
| 14
| 276
| 32
| 214
| 65
| 207
| 31
| 132
| 63
| 127
| 11
| 1
| 3
| 26
|
6,340
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/zamba/modeling_zamba.py
|
transformers.models.zamba.modeling_zamba.ZambaModel
|
from ...modeling_attn_mask_utils import AttentionMaskConverter
from .configuration_zamba import ZambaConfig
from torch import nn
from typing import Any, Callable, Optional, Union
from ...modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast, SequenceClassifierOutputWithPast
import torch
from ...utils import auto_docstring, logging
@auto_docstring
class ZambaModel(ZambaPreTrainedModel):
"""
Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`ZambaDecoderLayer`]
Args:
config: ZambaConfig
"""
def __init__(self, config: ZambaConfig):
super().__init__(config)
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
block = ZambaAttentionDecoderLayer(config)
mamba_layers = []
linear_layers = []
self.layers_block_type = config.layers_block_type
for i in range(config.num_hidden_layers):
if config.layers_block_type[i] == 'mamba':
mamba_layers.append(ZambaMambaDecoderLayer(config, layer_idx=i))
elif config.layers_block_type[i] == 'hybrid':
linear_layers.append(nn.Linear(self.config.hidden_size, self.config.hidden_size, bias=False))
mamba_layers.append(ZambaMambaDecoderLayer(config, layer_idx=i))
mamba_layers = iter(mamba_layers)
linear_layers = iter(linear_layers)
layers = []
self._tied_weights_keys = []
for layer_id, layer_type in enumerate(self.layers_block_type):
if layer_type == 'hybrid':
prefix_name = f'layers.{layer_id}.'
tied_keys = ['shared_transf.self_attn.q_proj.weight', 'shared_transf.self_attn.k_proj.weight', 'shared_transf.self_attn.v_proj.weight', 'shared_transf.self_attn.o_proj.weight', 'shared_transf.feed_forward.gate_proj.weight', 'shared_transf.feed_forward.up_proj.weight', 'shared_transf.feed_forward.down_proj.weight', 'shared_transf.input_layernorm.weight', 'shared_transf.pre_ff_layernorm.weight']
self._tied_weights_keys = [*self._tied_weights_keys, *[prefix_name + key for key in tied_keys]]
layers.append(ZambaHybridLayer(block, next(linear_layers), next(mamba_layers)))
else:
layers.append(next(mamba_layers))
self.layers = nn.ModuleList(layers)
self._attn_implementation = config._attn_implementation
self.final_layernorm = ZambaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.gradient_checkpointing = False
self.post_init()
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[ZambaHybridDynamicCache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None) -> Union[tuple, BaseModelOutputWithPast]:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time, and must specify either one')
if self.gradient_checkpointing and self.training and use_cache:
logger.warning_once('`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`.')
use_cache = False
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids)
hidden_states = inputs_embeds
original_hidden_states = torch.clone(inputs_embeds)
if use_cache and past_key_values is None:
logger.warning_once('Zamba requires an initialized `ZambaHybridDynamicCache` to return a cache. None was provided, so no cache will be returned.')
if cache_position is None:
cache_position = torch.arange(hidden_states.shape[1], device=hidden_states.device)
if position_ids is None:
position_ids = cache_position.unsqueeze(0)
causal_mask = self._update_causal_mask(attention_mask, inputs_embeds, cache_position)
all_hidden_states = () if output_hidden_states else None
all_self_attns = () if output_attentions else None
for layer_idx, layer in enumerate(self.layers):
if output_hidden_states:
all_hidden_states += (hidden_states,)
if self.gradient_checkpointing and self.training:
layer_outputs = self._gradient_checkpointing_func(layer.__call__, hidden_states, original_hidden_states, layer_idx, attention_mask, causal_mask, past_key_values, output_attentions, use_cache, cache_position)
else:
layer_outputs = layer(hidden_states, original_hidden_states=original_hidden_states, layer_idx=layer_idx, attention_mask=attention_mask, causal_mask=causal_mask, past_key_values=past_key_values, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position)
hidden_states = layer_outputs[0]
if output_attentions:
if layer_outputs[1] is not None:
all_self_attns += (layer_outputs[1],)
hidden_states = self.final_layernorm(hidden_states)
if output_hidden_states:
all_hidden_states += (hidden_states,)
if past_key_values and (not past_key_values.has_previous_state):
past_key_values.has_previous_state = True
output = BaseModelOutputWithPast(last_hidden_state=hidden_states, past_key_values=past_key_values if use_cache else None, hidden_states=all_hidden_states, attentions=all_self_attns)
return output if return_dict else output.to_tuple()
def _update_causal_mask(self, attention_mask, input_tensor, cache_position):
if self.config._attn_implementation == 'flash_attention_2':
if attention_mask is not None and 0.0 in attention_mask:
return attention_mask
return None
dtype, device = (input_tensor.dtype, input_tensor.device)
min_dtype = torch.finfo(dtype).min
sequence_length = input_tensor.shape[1]
target_length = cache_position[-1] + 1
causal_mask = torch.full((sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=device)
if sequence_length != 1:
causal_mask = torch.triu(causal_mask, diagonal=1)
causal_mask *= torch.arange(target_length, device=device) > cache_position.reshape(-1, 1)
causal_mask = causal_mask[None, None, :, :].expand(input_tensor.shape[0], 1, -1, -1)
if attention_mask is not None:
causal_mask = causal_mask.clone()
if attention_mask.dim() == 2:
mask_length = attention_mask.shape[-1]
padding_mask = causal_mask[..., :mask_length].eq(0.0) * attention_mask[:, None, None, :].eq(0.0)
causal_mask[..., :mask_length] = causal_mask[..., :mask_length].masked_fill(padding_mask, min_dtype)
if self.config._attn_implementation == 'sdpa' and attention_mask is not None and (attention_mask.device.type in ['cuda', 'xpu', 'npu']):
causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype)
return causal_mask
|
@auto_docstring
class ZambaModel(ZambaPreTrainedModel):
'''
Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`ZambaDecoderLayer`]
Args:
config: ZambaConfig
'''
def __init__(self, config: ZambaConfig):
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[ZambaHybridDynamicCache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None) -> Union[tuple, BaseModelOutputWithPast]:
pass
def _update_causal_mask(self, attention_mask, input_tensor, cache_position):
pass
| 6
| 1
| 38
| 5
| 32
| 2
| 7
| 0.09
| 1
| 14
| 8
| 1
| 5
| 9
| 5
| 7
| 206
| 30
| 163
| 51
| 144
| 14
| 98
| 38
| 92
| 22
| 2
| 3
| 37
|
6,341
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/zamba/modeling_zamba.py
|
transformers.models.zamba.modeling_zamba.ZambaPreTrainedModel
|
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
import torch
from ...utils import auto_docstring, logging
from torch import nn
from .configuration_zamba import ZambaConfig
import math
@auto_docstring
class ZambaPreTrainedModel(PreTrainedModel):
config: ZambaConfig
base_model_prefix = 'model'
supports_gradient_checkpointing = True
_no_split_modules = ['ZambaAttentionDecoderLayer', 'ZambaMambaDecoderLayer']
_skip_keys_device_placement = 'past_key_values'
_supports_flash_attn = False
_supports_sdpa = False
_is_stateful = True
def _init_weights(self, module):
std = self.config.initializer_range
if isinstance(module, (nn.Linear, nn.Conv1d)):
module.weight.data.normal_(mean=0.0, std=std)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=std)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, ZambaRMSNorm):
module.weight.data.fill_(1.0)
elif isinstance(module, ZambaMambaMixer):
module.x_proj_weight.data.normal_(mean=0.0, std=std)
dt_init_std = self.config.mamba_dt_rank ** (-0.5)
nn.init.uniform_(module.dt_proj_weight, -dt_init_std, dt_init_std)
mamba_head_dim = self.config.mamba_expand * self.config.hidden_size // self.config.n_mamba_heads
dt = torch.exp(torch.rand(self.config.n_mamba_heads, mamba_head_dim) * (math.log(self.config.time_step_max) - math.log(self.config.time_step_min)) + math.log(self.config.time_step_min)).clamp(min=self.config.time_step_floor)
inv_dt = dt + torch.log(-torch.expm1(-dt))
module.dt_proj_bias.data.copy_(inv_dt)
A = torch.arange(1, module.ssm_state_size + 1, dtype=torch.float32)[None, :]
A = A.expand(module.intermediate_size, -1).contiguous()
module.A_log.data.copy_(torch.log(A).reshape(module.n_mamba_heads, module.mamba_head_dim, -1))
module.D.data.fill_(1.0)
|
@auto_docstring
class ZambaPreTrainedModel(PreTrainedModel):
def _init_weights(self, module):
pass
| 3
| 0
| 26
| 3
| 20
| 3
| 4
| 0.13
| 1
| 5
| 1
| 3
| 1
| 0
| 2
| 2
| 65
| 7
| 52
| 25
| 40
| 7
| 35
| 17
| 32
| 6
| 1
| 2
| 8
|
6,342
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/zamba/modeling_zamba.py
|
transformers.models.zamba.modeling_zamba.ZambaRMSNorm
|
import torch
from torch import nn
class ZambaRMSNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-06):
"""
ZambaRMSNorm is equivalent to T5LayerNorm
"""
super().__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.variance_epsilon = eps
def forward(self, hidden_states):
input_dtype = hidden_states.dtype
hidden_states = hidden_states.to(torch.float32)
variance = hidden_states.pow(2).mean(-1, keepdim=True)
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
return self.weight * hidden_states.to(input_dtype)
def extra_repr(self):
return f'{tuple(self.weight.shape)}, eps={self.variance_epsilon}'
|
class ZambaRMSNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-06):
'''
ZambaRMSNorm is equivalent to T5LayerNorm
'''
pass
def forward(self, hidden_states):
pass
def extra_repr(self):
pass
| 4
| 1
| 5
| 0
| 4
| 1
| 1
| 0.23
| 1
| 2
| 0
| 1
| 3
| 2
| 3
| 13
| 18
| 2
| 13
| 8
| 9
| 3
| 13
| 8
| 9
| 1
| 1
| 0
| 3
|
6,343
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/zamba2/configuration_zamba2.py
|
transformers.models.zamba2.configuration_zamba2.Zamba2Config
|
from ...configuration_utils import PretrainedConfig
class Zamba2Config(PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`Zamba2Model`]. It is used to instantiate a
Zamba2 model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the Zamba2 model.
[Zyphra/Zamba2-2.7B](https://huggingface.co/Zyphra/Zamba2-2.7B)
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 32000):
Vocabulary size of the Zamba2 model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`Zamba2Model`]
max_position_embeddings (`int`, *optional*, defaults to 4096):
The maximum sequence length that this model might ever be used with.
hidden_size (`int`, *optional*, defaults to 2560):
Dimension of the hidden representations.
num_hidden_layers (`int`, *optional*, defaults to 54):
Number of hidden layers in the model.
layers_block_type (`list`, *optional*):
List of layer types, which can be either "mamba" or "hybrid".
mamba_d_state (`int`, *optional*, defaults to 64): shape of the state space latents.
mamba_d_conv (`int`, *optional*, defaults to 4): Size of the convolution kernel.
mamba_expand (`int`, *optional*, defaults to 2): Expanding factor used to determine the intermediate size.
mamba_ngroups (`int`, *optional*, defaults to 1):
Number of groups for the evolution matrices of mamba 2.
time_step_min (`float`, *optional*, defaults to 0.001):
Minimum `time_step` used to bound `dt_proj.bias`.
time_step_max (`float`, *optional*, defaults to 0.1):
Maximum `time_step` used to bound `dt_proj.bias`.
time_step_floor (`float`, *optional*, defaults to 0.0001):
Minimum clamping value of the `dt_proj.bias` layer initialization.
time_step_limit (`tuple`, *optional*):
Accepted range of time step values.
n_mamba_heads (`int`, *optional*, defaults to 8):
Number of heads for the evolution matrices of mamba 2.
use_conv_bias (`bool`, *optional*, defaults to `True`):
Whether or not to use bias in the convolution layer of the mixer block.
chunk_size (`int`, *optional*, defaults to 256):
Size of the chunks that will comprise the sequence.
use_mem_eff_path (`bool`, *optional*, defaults to `False`):
Whether or not to use the fused conv1d and scan in mamba2 layers.
add_bias_linear (`bool`, *optional*, defaults to `False`):
Flag indicating whether or not to use bias in various layers
intermediate_size (`int`, *optional*, defaults to 4 * hidden_size):
Dimension of the MLP representations.
hidden_act (`str`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the MLP.
num_attention_heads (`int`, *optional*, defaults to 32):
Number of attention heads for each attention layer in the Transformer decoder.
num_key_value_heads (`int`, *optional*):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`num_key_value_heads=None`, the model will use Multi Head Attention (MHA), if
`num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details, check out [this
paper](https://huggingface.co/papers/2305.13245).
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
num_mem_blocks (`int`, *optional*, defaults to 1):
Number of unshared transformer blocks.
use_shared_attention_adapter (`bool`, *optional*, defaults to `False`):
If True, unshared adapters (formally the same as LoRA but used in the base model) will be added to the q, k, v projectors in the shared attention layers.
adapter_rank (`int`, *optional*, defaults to 128):
Rank of the adapter in the shared MLP and shared attention layers.
use_mem_rope (`bool`, *optional*, defaults to `False`):
If True, includes RoPE in the shared attention layers.
rope_theta (`float`, *optional*, defaults to `10000.0`):
The base period of the RoPE embeddings.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
rms_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the rms normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
num_logits_to_keep (`int` or `None`, *optional*, defaults to 1):
Number of prompt logits to calculate during generation. If `None`, all logits will be calculated. If an
integer value, only last `num_logits_to_keep` logits will be calculated. Default is 1 because only the
logits of the last prompt token are needed for generation. For long sequences, the logits for the entire
sequence may use a lot of memory so, setting `num_logits_to_keep=1` will reduce memory footprint
significantly.
pad_token_id (`int`, *optional*, defaults to 0):
The id of the padding token.
bos_token_id (`int`, *optional*, defaults to 1):
The id of the "beginning-of-sequence" token.
eos_token_id (`int`, *optional*, defaults to 2):
The id of the "end-of-sequence" token.
use_long_context (`bool`, *optional*, defaults to `False`):
Activates the context-extended version of Zamba by modifying RoPE.
```python
>>> from transformers import Zamba2Model, Zamba2Config
>>> # Initializing a Zamba2-2.7B style configuration
>>> configuration = Zamba2Config()
>>> # Initializing a model from the Zamba2-2.7B style configuration
>>> model = Zamba2Model(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = 'zamba2'
attribute_map = {'head_dim': 'attention_head_dim'}
keys_to_ignore_at_inference = ['past_key_values']
def __init__(self, vocab_size=32000, max_position_embeddings=4096, hidden_size=2560, num_hidden_layers=54, layers_block_type=None, mamba_d_state=64, mamba_d_conv=4, mamba_expand=2, mamba_ngroups=1, time_step_min=0.001, time_step_max=0.1, time_step_floor=0.0001, time_step_limit=None, n_mamba_heads=8, use_conv_bias=True, chunk_size=256, use_mem_eff_path=False, add_bias_linear=False, intermediate_size=None, hidden_act='gelu', num_attention_heads=32, num_key_value_heads=None, attention_dropout=0.0, num_mem_blocks=1, use_shared_attention_adapter=False, adapter_rank=128, use_mem_rope=False, rope_theta=10000, initializer_range=0.02, rms_norm_eps=1e-05, use_cache=True, num_logits_to_keep=1, pad_token_id=0, bos_token_id=1, eos_token_id=2, use_long_context=False, **kwargs):
super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.hidden_size = hidden_size
if intermediate_size is None:
self.intermediate_size = 4 * hidden_size
else:
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.num_mem_blocks = num_mem_blocks
self.attention_hidden_size = 2 * hidden_size
self.attention_head_dim = 2 * self.hidden_size // self.num_attention_heads
self.attention_dropout = attention_dropout
self.use_mem_rope = use_mem_rope
self.use_long_context = use_long_context
if use_mem_rope and use_long_context:
a = 8
rope_theta = rope_theta * a ** (self.attention_head_dim / (self.attention_head_dim - 2))
self.rope_theta = rope_theta
self.mamba_d_state = mamba_d_state
self.mamba_d_conv = mamba_d_conv
self.mamba_expand = mamba_expand
self.add_bias_linear = add_bias_linear
self.mamba_ngroups = mamba_ngroups
self.n_mamba_heads = n_mamba_heads
self.mamba_headdim = int(mamba_expand * hidden_size) // n_mamba_heads
self.use_conv_bias = use_conv_bias
self.chunk_size = chunk_size
self.time_step_limit = time_step_limit
self.use_shared_attention_adapter = use_shared_attention_adapter
self.adapter_rank = adapter_rank
self.time_step_min = time_step_min
self.time_step_max = time_step_max
self.time_step_floor = time_step_floor
if use_long_context:
self.max_position_embeddings = 16384
if num_key_value_heads is None:
num_key_value_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.num_attention_heads = num_attention_heads
self.kv_channels = self.hidden_size // self.num_attention_heads
self.num_query_groups = self.num_attention_heads
if layers_block_type is None:
self.layers_block_type = ['mamba'] + (['mamba'] * 5 + ['hybrid']) * 7 + ['mamba'] * 4 + ['hybrid'] + ['mamba'] * 3 + ['hybrid'] + ['mamba'] * 2
else:
self.layers_block_type = layers_block_type
self.initializer_range = initializer_range
self.rms_norm_eps = rms_norm_eps
self.use_cache = use_cache
self.num_logits_to_keep = num_logits_to_keep
self.hybrid_layer_ids = [index for index, type in enumerate(self.layers_block_type) if type == 'hybrid']
self.use_mem_eff_path = use_mem_eff_path
|
class Zamba2Config(PretrainedConfig):
'''
This is the configuration class to store the configuration of a [`Zamba2Model`]. It is used to instantiate a
Zamba2 model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the Zamba2 model.
[Zyphra/Zamba2-2.7B](https://huggingface.co/Zyphra/Zamba2-2.7B)
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 32000):
Vocabulary size of the Zamba2 model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`Zamba2Model`]
max_position_embeddings (`int`, *optional*, defaults to 4096):
The maximum sequence length that this model might ever be used with.
hidden_size (`int`, *optional*, defaults to 2560):
Dimension of the hidden representations.
num_hidden_layers (`int`, *optional*, defaults to 54):
Number of hidden layers in the model.
layers_block_type (`list`, *optional*):
List of layer types, which can be either "mamba" or "hybrid".
mamba_d_state (`int`, *optional*, defaults to 64): shape of the state space latents.
mamba_d_conv (`int`, *optional*, defaults to 4): Size of the convolution kernel.
mamba_expand (`int`, *optional*, defaults to 2): Expanding factor used to determine the intermediate size.
mamba_ngroups (`int`, *optional*, defaults to 1):
Number of groups for the evolution matrices of mamba 2.
time_step_min (`float`, *optional*, defaults to 0.001):
Minimum `time_step` used to bound `dt_proj.bias`.
time_step_max (`float`, *optional*, defaults to 0.1):
Maximum `time_step` used to bound `dt_proj.bias`.
time_step_floor (`float`, *optional*, defaults to 0.0001):
Minimum clamping value of the `dt_proj.bias` layer initialization.
time_step_limit (`tuple`, *optional*):
Accepted range of time step values.
n_mamba_heads (`int`, *optional*, defaults to 8):
Number of heads for the evolution matrices of mamba 2.
use_conv_bias (`bool`, *optional*, defaults to `True`):
Whether or not to use bias in the convolution layer of the mixer block.
chunk_size (`int`, *optional*, defaults to 256):
Size of the chunks that will comprise the sequence.
use_mem_eff_path (`bool`, *optional*, defaults to `False`):
Whether or not to use the fused conv1d and scan in mamba2 layers.
add_bias_linear (`bool`, *optional*, defaults to `False`):
Flag indicating whether or not to use bias in various layers
intermediate_size (`int`, *optional*, defaults to 4 * hidden_size):
Dimension of the MLP representations.
hidden_act (`str`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the MLP.
num_attention_heads (`int`, *optional*, defaults to 32):
Number of attention heads for each attention layer in the Transformer decoder.
num_key_value_heads (`int`, *optional*):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`num_key_value_heads=None`, the model will use Multi Head Attention (MHA), if
`num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details, check out [this
paper](https://huggingface.co/papers/2305.13245).
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
num_mem_blocks (`int`, *optional*, defaults to 1):
Number of unshared transformer blocks.
use_shared_attention_adapter (`bool`, *optional*, defaults to `False`):
If True, unshared adapters (formally the same as LoRA but used in the base model) will be added to the q, k, v projectors in the shared attention layers.
adapter_rank (`int`, *optional*, defaults to 128):
Rank of the adapter in the shared MLP and shared attention layers.
use_mem_rope (`bool`, *optional*, defaults to `False`):
If True, includes RoPE in the shared attention layers.
rope_theta (`float`, *optional*, defaults to `10000.0`):
The base period of the RoPE embeddings.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
rms_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the rms normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
num_logits_to_keep (`int` or `None`, *optional*, defaults to 1):
Number of prompt logits to calculate during generation. If `None`, all logits will be calculated. If an
integer value, only last `num_logits_to_keep` logits will be calculated. Default is 1 because only the
logits of the last prompt token are needed for generation. For long sequences, the logits for the entire
sequence may use a lot of memory so, setting `num_logits_to_keep=1` will reduce memory footprint
significantly.
pad_token_id (`int`, *optional*, defaults to 0):
The id of the padding token.
bos_token_id (`int`, *optional*, defaults to 1):
The id of the "beginning-of-sequence" token.
eos_token_id (`int`, *optional*, defaults to 2):
The id of the "end-of-sequence" token.
use_long_context (`bool`, *optional*, defaults to `False`):
Activates the context-extended version of Zamba by modifying RoPE.
```python
>>> from transformers import Zamba2Model, Zamba2Config
>>> # Initializing a Zamba2-2.7B style configuration
>>> configuration = Zamba2Config()
>>> # Initializing a model from the Zamba2-2.7B style configuration
>>> model = Zamba2Model(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```'''
def __init__(self, vocab_size=32000, max_position_embeddings=4096, hidden_size=2560, num_hidden_layers=54, layers_block_type=None, mamba_d_state=64, mamba_d_conv=4, mamba_expand=2, mamba_ngroups=1, time_step_min=0.001, time_step_max=0.1, time_step_floor=0.0001, time_step_limit=None, n_mamba_heads=8, use_conv_bias=True, chunk_size=256, use_mem_eff_path=False, add_bias_linear=False, intermediate_size=None, hidden_act='gelu', num_attention_heads=32, num_key_value_heads=None, attention_dropout=0.0, num_mem_blocks=1, use_shared_attention_adapter=False, adapter_rank=128, use_mem_rope=False, rope_theta=10000, initializer_range=0.02, rms_norm_eps=1e-05, use_cache=True, num_logits_to_keep=1, pad_token_id=0, bos_token_id=1, eos_token_id=2, use_long_context=False, **kwargs):
pass
| 2
| 1
| 108
| 0
| 107
| 1
| 6
| 0.89
| 1
| 3
| 0
| 0
| 1
| 39
| 1
| 1
| 212
| 4
| 110
| 83
| 69
| 98
| 56
| 44
| 54
| 6
| 1
| 1
| 6
|
6,344
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/zamba2/modeling_zamba2.py
|
transformers.models.zamba2.modeling_zamba2.Zamba2Attention
|
from ...utils.deprecation import deprecate_kwarg
import torch
from ...processing_utils import Unpack
from .configuration_zamba2 import Zamba2Config
from typing import Any, Callable, Optional, Union
from ...modeling_flash_attention_utils import FlashAttentionKwargs
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from torch import nn
class Zamba2Attention(nn.Module):
"""
Multi-headed attention from 'Attention Is All You Need' paper.
Adapted from transformers.models.mistral.modeling_mistral.MistralAttention:
The input dimension here is attention_hidden_size = 2 * hidden_size, and head_dim = attention_hidden_size // num_heads.
The extra factor of 2 comes from the input being the concatenation of original_hidden_states with the output of the previous (mamba) layer
(see fig. 2 in https://huggingface.co/papers/2405.16712).
Additionally, replaced
attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim) with
attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim/2)
Finally, this attention layer contributes to tied transformer blocks aimed to increasing compute without increasing model size. Because this
layer is tied, un-tied adapters (formally the same as LoRA but used in the base model) modules are added to the q, k, v projectors to increase
expressivity with a small memory overhead (see Fig. 2 of https://huggingface.co/papers/2411.15242).
"""
def __init__(self, config: Zamba2Config, layer_idx: Optional[int]=None, num_fwd_mem_blocks: Optional[int]=None, block_id: Optional[int]=None):
super().__init__()
self.config = config
self.layer_idx = layer_idx
self.attention_hidden_size = config.attention_hidden_size
self.head_dim = config.attention_head_dim
self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
self.max_position_embeddings = config.max_position_embeddings
self.scaling = (self.head_dim / 2) ** (-0.5)
self.is_causal = True
self.attention_dropout = config.attention_dropout
self.q_proj = nn.Linear(config.attention_hidden_size, config.num_attention_heads * self.head_dim, bias=False)
self.k_proj = nn.Linear(config.attention_hidden_size, config.num_key_value_heads * self.head_dim, bias=False)
self.v_proj = nn.Linear(config.attention_hidden_size, config.num_key_value_heads * self.head_dim, bias=False)
self.o_proj = nn.Linear(config.num_attention_heads * self.head_dim, config.hidden_size, bias=False)
self.num_fwd_mem_blocks = num_fwd_mem_blocks
self.layer_block_map = config.hybrid_layer_ids
self.block_id = block_id
if config.use_shared_attention_adapter:
self.linear_q_adapter_list = nn.ModuleList([])
self.linear_k_adapter_list = nn.ModuleList([])
self.linear_v_adapter_list = nn.ModuleList([])
for i in range(self.num_fwd_mem_blocks):
if i % config.num_mem_blocks == block_id:
linear_q_adapter = nn.Sequential(nn.Linear(self.attention_hidden_size, self.config.adapter_rank, bias=False), nn.Linear(self.config.adapter_rank, self.attention_hidden_size, bias=False))
linear_k_adapter = nn.Sequential(nn.Linear(self.attention_hidden_size, self.config.adapter_rank, bias=False), nn.Linear(self.config.adapter_rank, self.attention_hidden_size, bias=False))
linear_v_adapter = nn.Sequential(nn.Linear(self.attention_hidden_size, self.config.adapter_rank, bias=False), nn.Linear(self.config.adapter_rank, self.attention_hidden_size, bias=False))
else:
linear_q_adapter = nn.Identity()
linear_k_adapter = nn.Identity()
linear_v_adapter = nn.Identity()
self.linear_q_adapter_list.append(linear_q_adapter)
self.linear_k_adapter_list.append(linear_k_adapter)
self.linear_v_adapter_list.append(linear_v_adapter)
self.layer_dic = {value: index for index, value in enumerate(self.layer_block_map)}
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, layer_idx: int, attention_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Zamba2HybridDynamicCache]=None, position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]]=None, **kwargs: Unpack[FlashAttentionKwargs]) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, self.head_dim)
query_states = self.q_proj(hidden_states)
key_states = self.k_proj(hidden_states)
value_states = self.v_proj(hidden_states)
if self.config.use_shared_attention_adapter:
adapter_layer_idx = self.layer_dic[layer_idx]
query_states = query_states + self.linear_q_adapter_list[adapter_layer_idx](hidden_states)
key_states = key_states + self.linear_k_adapter_list[adapter_layer_idx](hidden_states)
value_states = value_states + self.linear_v_adapter_list[adapter_layer_idx](hidden_states)
query_states = query_states.view(hidden_shape).transpose(1, 2)
key_states = key_states.view(hidden_shape).transpose(1, 2)
value_states = value_states.view(hidden_shape).transpose(1, 2)
if self.config.use_mem_rope:
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
if past_key_values is not None:
key_states, value_states = past_key_values.update(key_states, value_states, layer_idx)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != 'eager':
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(self, query_states, key_states, value_states, attention_mask, dropout=0.0 if not self.training else self.attention_dropout, scaling=self.scaling, **kwargs)
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
attn_output = self.o_proj(attn_output)
return (attn_output, attn_weights)
|
class Zamba2Attention(nn.Module):
'''
Multi-headed attention from 'Attention Is All You Need' paper.
Adapted from transformers.models.mistral.modeling_mistral.MistralAttention:
The input dimension here is attention_hidden_size = 2 * hidden_size, and head_dim = attention_hidden_size // num_heads.
The extra factor of 2 comes from the input being the concatenation of original_hidden_states with the output of the previous (mamba) layer
(see fig. 2 in https://huggingface.co/papers/2405.16712).
Additionally, replaced
attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim) with
attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim/2)
Finally, this attention layer contributes to tied transformer blocks aimed to increasing compute without increasing model size. Because this
layer is tied, un-tied adapters (formally the same as LoRA but used in the base model) modules are added to the q, k, v projectors to increase
expressivity with a small memory overhead (see Fig. 2 of https://huggingface.co/papers/2411.15242).
'''
def __init__(self, config: Zamba2Config, layer_idx: Optional[int]=None, num_fwd_mem_blocks: Optional[int]=None, block_id: Optional[int]=None):
pass
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, layer_idx: int, attention_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Zamba2HybridDynamicCache]=None, position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]]=None, **kwargs: Unpack[FlashAttentionKwargs]) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
pass
| 4
| 1
| 56
| 6
| 50
| 0
| 6
| 0.22
| 1
| 8
| 3
| 0
| 2
| 20
| 2
| 12
| 139
| 17
| 100
| 50
| 83
| 22
| 63
| 36
| 60
| 7
| 1
| 3
| 11
|
6,345
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/zamba2/modeling_zamba2.py
|
transformers.models.zamba2.modeling_zamba2.Zamba2AttentionDecoderLayer
|
from ...processing_utils import Unpack
from .configuration_zamba2 import Zamba2Config
from typing import Any, Callable, Optional, Union
from ...modeling_flash_attention_utils import FlashAttentionKwargs
from torch import nn
from ...utils.deprecation import deprecate_kwarg
import torch
class Zamba2AttentionDecoderLayer(nn.Module):
def __init__(self, config: Zamba2Config, block_id: Optional[int]=None, layer_idx: Optional[int]=None):
super().__init__()
self.block_id = block_id
num_gs = len(config.hybrid_layer_ids)
self.self_attn = Zamba2Attention(config, layer_idx=-1, num_fwd_mem_blocks=num_gs, block_id=block_id)
self.feed_forward = Zamba2MLP(config, num_fwd_mem_blocks=num_gs, block_id=block_id)
self.input_layernorm = Zamba2RMSNorm(config.attention_hidden_size, eps=config.rms_norm_eps)
self.pre_ff_layernorm = Zamba2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, original_hidden_states: torch.Tensor, layer_idx: int, attention_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Zamba2HybridDynamicCache]=None, output_attentions: Optional[bool]=False, position_embeddings: Optional[torch.LongTensor]=None, **kwargs: Unpack[FlashAttentionKwargs]) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]:
"""
Args:
hidden_states (`torch.FloatTensor`): output of previous Mamba layer of shape `(batch, seq_len, embed_dim)`
original_hidden_states (`torch.FloatTensor`): word embedding output of shape `(batch, seq_len, embed_dim)`.
This is concatenated with `hidden_states` (which is the output of the previous (mamba) layer). The
concatenated tensor is then used as input of the pre-attention RMSNorm
(see fig. 2 in https://huggingface.co/papers/2405.16712).
attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
`(batch, sequence_length)` where padding elements are indicated by 0.
past_key_values (`Zamba2HybridDynamicCache`, *optional*): cached past key and value projection states
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
(see `past_key_values`).
position_embeddings (`tuple[torch.FloatTensor, torch.FloatTensor]`, *optional*):
Tuple containing the cosine and sine positional embeddings of shape `(batch_size, seq_len, head_dim)`,
with `head_dim` being the embedding dimension of each attention head.
"""
hidden_states = torch.concatenate([hidden_states, original_hidden_states], dim=-1)
hidden_states = self.input_layernorm(hidden_states)
hidden_states, self_attn_weights = self.self_attn(hidden_states=hidden_states, layer_idx=layer_idx, attention_mask=attention_mask, past_key_values=past_key_values, output_attentions=output_attentions, position_embeddings=position_embeddings, **kwargs)
hidden_states = self.pre_ff_layernorm(hidden_states)
hidden_states = self.feed_forward(hidden_states, layer_idx)
outputs = (hidden_states,)
if output_attentions:
outputs += (self_attn_weights,)
return outputs
|
class Zamba2AttentionDecoderLayer(nn.Module):
def __init__(self, config: Zamba2Config, block_id: Optional[int]=None, layer_idx: Optional[int]=None):
pass
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, original_hidden_states: torch.Tensor, layer_idx: int, attention_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Zamba2HybridDynamicCache]=None, output_attentions: Optional[bool]=False, position_embeddings: Optional[torch.LongTensor]=None, **kwargs: Unpack[FlashAttentionKwargs]) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]:
'''
Args:
hidden_states (`torch.FloatTensor`): output of previous Mamba layer of shape `(batch, seq_len, embed_dim)`
original_hidden_states (`torch.FloatTensor`): word embedding output of shape `(batch, seq_len, embed_dim)`.
This is concatenated with `hidden_states` (which is the output of the previous (mamba) layer). The
concatenated tensor is then used as input of the pre-attention RMSNorm
(see fig. 2 in https://huggingface.co/papers/2405.16712).
attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
`(batch, sequence_length)` where padding elements are indicated by 0.
past_key_values (`Zamba2HybridDynamicCache`, *optional*): cached past key and value projection states
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
(see `past_key_values`).
position_embeddings (`tuple[torch.FloatTensor, torch.FloatTensor]`, *optional*):
Tuple containing the cosine and sine positional embeddings of shape `(batch_size, seq_len, head_dim)`,
with `head_dim` being the embedding dimension of each attention head.
'''
pass
| 4
| 1
| 30
| 2
| 18
| 10
| 2
| 0.54
| 1
| 10
| 6
| 0
| 2
| 5
| 2
| 12
| 62
| 5
| 37
| 21
| 24
| 20
| 19
| 11
| 16
| 2
| 1
| 1
| 3
|
6,346
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/zamba2/modeling_zamba2.py
|
transformers.models.zamba2.modeling_zamba2.Zamba2ForCausalLM
|
from ...modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast, SequenceClassifierOutputWithPast
from .configuration_zamba2 import Zamba2Config
from typing import Any, Callable, Optional, Union
from torch import nn
from ...generation import GenerationMixin
import torch
from ...utils import auto_docstring, logging
class Zamba2ForCausalLM(Zamba2PreTrainedModel, GenerationMixin):
def __init__(self, config: Zamba2Config):
super().__init__(config)
self.model = Zamba2Model(config)
self._tied_weights_keys = ['lm_head.weight', *self.model._tied_weights_keys]
self.vocab_size = config.vocab_size
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.post_init()
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Zamba2HybridDynamicCache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, logits_to_keep: Union[int, torch.Tensor]=0, **kwargs) -> Union[tuple, CausalLMOutputWithPast]:
"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Example:
```python
>>> from transformers import AutoTokenizer, Zamba2ForCausalLM
>>> model = Zamba2ForCausalLM.from_pretrained("Zyphra/Zamba2-7B-v1")
>>> tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-7B-v1")
>>> prompt = "Hey, are you conscious? Can you talk to me?"
>>> inputs = tokenizer(prompt, return_tensors="pt")
>>> # Generate
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"Hey, are you conscious? Can you talk to me?\\nI'm not conscious, but I can talk to you."
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.model(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, cache_position=cache_position, return_dict=return_dict)
hidden_states = outputs[0]
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :])
loss = None
if labels is not None:
loss = self.loss_function(logits, labels, self.vocab_size, **kwargs)
if not return_dict:
output = (logits,) + outputs[1:]
return (loss,) + output if loss is not None else output
return CausalLMOutputWithPast(loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
def prepare_inputs_for_generation(self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, cache_position=None, position_ids=None, use_cache=True, **kwargs):
empty_past_kv = past_key_values is None
if not empty_past_kv:
if inputs_embeds is not None or cache_position[-1] >= input_ids.shape[1]:
input_ids = input_ids[:, -cache_position.shape[0]:]
elif input_ids.shape[1] != cache_position.shape[0]:
input_ids = input_ids[:, cache_position]
else:
past_key_values = Zamba2HybridDynamicCache(self.config, input_ids.shape[0], dtype=self.dtype, device=self.device)
if attention_mask is not None and position_ids is None:
position_ids = attention_mask.long().cumsum(-1) - 1
position_ids.masked_fill_(attention_mask == 0, 1)
if not empty_past_kv:
position_ids = position_ids[:, -input_ids.shape[1]:]
if inputs_embeds is not None and empty_past_kv:
model_inputs = {'inputs_embeds': inputs_embeds}
else:
model_inputs = {'input_ids': input_ids.contiguous()}
model_inputs.update({'position_ids': position_ids, 'past_key_values': past_key_values, 'use_cache': use_cache, 'attention_mask': attention_mask, 'logits_to_keep': self.config.num_logits_to_keep, 'cache_position': cache_position})
for key, value in kwargs.items():
if key not in model_inputs:
model_inputs[key] = value
return model_inputs
|
class Zamba2ForCausalLM(Zamba2PreTrainedModel, GenerationMixin):
def __init__(self, config: Zamba2Config):
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Zamba2HybridDynamicCache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, logits_to_keep: Union[int, torch.Tensor]=0, **kwargs) -> Union[tuple, CausalLMOutputWithPast]:
'''
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Example:
```python
>>> from transformers import AutoTokenizer, Zamba2ForCausalLM
>>> model = Zamba2ForCausalLM.from_pretrained("Zyphra/Zamba2-7B-v1")
>>> tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-7B-v1")
>>> prompt = "Hey, are you conscious? Can you talk to me?"
>>> inputs = tokenizer(prompt, return_tensors="pt")
>>> # Generate
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
```'''
pass
def prepare_inputs_for_generation(self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, cache_position=None, position_ids=None, use_cache=True, **kwargs):
pass
| 5
| 1
| 19
| 2
| 12
| 5
| 2
| 0.35
| 2
| 9
| 4
| 0
| 9
| 6
| 9
| 10
| 181
| 28
| 116
| 49
| 78
| 41
| 52
| 22
| 42
| 8
| 2
| 2
| 22
|
6,347
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/zamba2/modeling_zamba2.py
|
transformers.models.zamba2.modeling_zamba2.Zamba2ForSequenceClassification
|
import torch
from ...cache_utils import Cache
from ...utils import auto_docstring, logging
from ...modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast, SequenceClassifierOutputWithPast
from typing import Any, Callable, Optional, Union
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from torch import nn
@auto_docstring(custom_intro='\n The Zamba2 Model with a sequence classification head on top (linear layer).\n\n [`Zamba2ForSequenceClassification`] uses the last token in order to do the classification, as other causal models\n (e.g. GPT-2) do.\n\n Since it does classification on the last token, it requires to know the position of the last token. If a\n `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If\n no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the\n padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in\n each row of the batch).\n ')
class Zamba2ForSequenceClassification(Zamba2PreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.model = Zamba2Model(config)
self._tied_weights_keys = self.model._tied_weights_keys
self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False)
self.post_init()
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Union[Cache, list[torch.FloatTensor]]]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, SequenceClassifierOutputWithPast]:
"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
transformer_outputs = self.model(input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
hidden_states = transformer_outputs[0]
logits = self.score(hidden_states)
if input_ids is not None:
batch_size = input_ids.shape[0]
else:
batch_size = inputs_embeds.shape[0]
if self.config.pad_token_id is None and batch_size != 1:
raise ValueError('Cannot handle batch sizes > 1 if no padding token is defined.')
if self.config.pad_token_id is None:
last_non_pad_token = -1
elif input_ids is not None:
non_pad_mask = (input_ids != self.config.pad_token_id).to(logits.device, torch.int32)
token_indices = torch.arange(input_ids.shape[-1], device=logits.device, dtype=torch.int32)
last_non_pad_token = (token_indices * non_pad_mask).argmax(-1)
else:
last_non_pad_token = -1
logger.warning_once(f'{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be unexpected if using padding tokens in conjunction with `inputs_embeds.`')
pooled_logits = logits[torch.arange(batch_size, device=logits.device), last_non_pad_token]
loss = None
if labels is not None:
labels = labels.to(logits.device)
if self.config.problem_type is None:
if self.num_labels == 1:
self.config.problem_type = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = 'single_label_classification'
else:
self.config.problem_type = 'multi_label_classification'
if self.config.problem_type == 'regression':
loss_fct = MSELoss()
if self.num_labels == 1:
loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(pooled_logits, labels)
elif self.config.problem_type == 'single_label_classification':
loss_fct = CrossEntropyLoss()
loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))
elif self.config.problem_type == 'multi_label_classification':
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(pooled_logits, labels)
if not return_dict:
output = (pooled_logits,) + transformer_outputs[1:]
return (loss,) + output if loss is not None else output
return SequenceClassifierOutputWithPast(loss=loss, logits=pooled_logits, past_key_values=transformer_outputs.past_key_values, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions)
| null | 5
| 1
| 26
| 2
| 22
| 2
| 5
| 0.09
| 1
| 8
| 3
| 0
| 4
| 4
| 4
| 5
| 110
| 11
| 91
| 33
| 73
| 8
| 51
| 20
| 46
| 16
| 2
| 3
| 19
|
6,348
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/zamba2/modeling_zamba2.py
|
transformers.models.zamba2.modeling_zamba2.Zamba2HybridDynamicCache
|
from .configuration_zamba2 import Zamba2Config
from typing import Any, Callable, Optional, Union
import torch
class Zamba2HybridDynamicCache:
"""
A dynamic cache that can handle both the attention cache (which has a seq_len dimension) and the mamba cache
(which has a constant shape regardless of seq_len).
This cache has two sets of lists of tensors: `key_cache` and `value_cache` for attention cache and `conv_states`
and `ssm_states` for mamba cache. Each of these lists has `num_layers` tensors. The expected shape for each tensor
For attention layers, `key_cache` and `value_cache` have a shape of `(batch_size, num_heads, seq_len, head_dim)`,
while `conv_states` and `ssm_states` have a shape of `(batch_size, 0)` (empty tensors).
For mamba layers, `key_cache` and `value_cache` have a shape of `(batch_size, 0)` (empty tensors),
while `conv_states` represents the convolution state and has a shape of `(batch_size, d_inner, d_conv)`,
and `ssm_states` represents the ssm state and has a shape of `(batch_size, d_inner, d_state)`.
"""
is_compileable = False
def __init__(self, config: Zamba2Config, batch_size: int, dtype: torch.dtype=torch.float16, device: Optional[str]=None):
self.dtype = dtype
self.layers_block_type = config.layers_block_type
self.has_previous_state = False
self.intermediate_size = int(config.mamba_expand * config.hidden_size)
self.ssm_state_size = config.mamba_d_state
self.conv_kernel_size = config.mamba_d_conv
self.n_mamba_heads = config.n_mamba_heads
self.transformer_layers = []
self._modules = {}
self._parameters = {}
self._buffers = {}
self.conv_states = {}
self.ssm_states = {}
for i in range(config.num_hidden_layers):
self.conv_states[i] = torch.zeros(batch_size, self.intermediate_size + 2 * config.mamba_ngroups * config.mamba_d_state, self.conv_kernel_size, device=device, dtype=dtype)
self.ssm_states[i] = torch.zeros(batch_size, self.n_mamba_heads, config.mamba_headdim, self.ssm_state_size, device=device, dtype=dtype)
if self.layers_block_type[i] == 'hybrid':
self.transformer_layers.append(i)
self.key_cache = [torch.tensor([[]] * batch_size, device=device) for _ in range(config.num_hidden_layers)]
self.value_cache = [torch.tensor([[]] * batch_size, device=device) for _ in range(config.num_hidden_layers)]
def __len__(self):
return len(self.key_cache)
def __getitem__(self, layer_idx: int) -> tuple[torch.Tensor, torch.Tensor]:
return (self.key_cache[layer_idx], self.value_cache[layer_idx])
def update(self, key_states: torch.Tensor, value_states: torch.Tensor, layer_idx: int, cache_kwargs: Optional[dict[str, Any]]=None) -> tuple[torch.Tensor, torch.Tensor]:
if self.key_cache[layer_idx].shape[-1] == 0:
self.key_cache[layer_idx] = key_states
self.value_cache[layer_idx] = value_states
else:
self.key_cache[layer_idx] = torch.cat([self.key_cache[layer_idx], key_states], dim=2)
self.value_cache[layer_idx] = torch.cat([self.value_cache[layer_idx], value_states], dim=2)
return (self.key_cache[layer_idx], self.value_cache[layer_idx])
def reorder_cache(self, beam_idx: torch.LongTensor):
"""Reorders the cache for beam search, given the selected beam indices."""
for layer_idx in range(len(self.key_cache)):
device = self.key_cache[layer_idx].device
self.key_cache[layer_idx] = self.key_cache[layer_idx].index_select(0, beam_idx.to(device))
device = self.value_cache[layer_idx].device
self.value_cache[layer_idx] = self.value_cache[layer_idx].index_select(0, beam_idx.to(device))
device = self.conv_states[layer_idx].device
self.conv_states[layer_idx] = self.conv_states[layer_idx].index_select(0, beam_idx.to(device))
device = self.ssm_states[layer_idx].device
self.ssm_states[layer_idx] = self.ssm_states[layer_idx].index_select(0, beam_idx.to(device))
def get_seq_length(self, layer_idx: Optional[int]=0) -> int:
"""Returns the sequence length of the cached states. A layer index can be optionally passed."""
layer_idx = self.transformer_layers[0] if layer_idx not in self.transformer_layers else layer_idx
if len(self.key_cache) <= layer_idx or self.key_cache[layer_idx].numel() == 0:
return 0
return self.key_cache[layer_idx].shape[-2]
def update_conv_state(self, layer_idx: int, new_conv_state: torch.Tensor, cache_position: torch.LongTensor) -> torch.Tensor:
conv_state = self.conv_states[layer_idx]
cache_position = cache_position.clamp(0, self.conv_kernel_size - 1)
conv_state = conv_state.roll(shifts=-1, dims=-1)
conv_state[:, :, cache_position] = new_conv_state.to(conv_state.device)
self.conv_states[layer_idx].zero_()
self.conv_states[layer_idx] += conv_state
return self.conv_states[layer_idx]
def reset(self):
self.conv_states.zero_()
self.ssm_states.zero_()
| null | 9
| 3
| 11
| 0
| 10
| 1
| 2
| 0.19
| 1
| 7
| 1
| 0
| 7
| 15
| 8
| 39
| 106
| 12
| 79
| 39
| 59
| 15
| 59
| 28
| 50
| 3
| 3
| 2
| 14
|
6,349
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/zamba2/modeling_zamba2.py
|
transformers.models.zamba2.modeling_zamba2.Zamba2HybridLayer
|
from torch import nn
from ...utils.deprecation import deprecate_kwarg
from typing import Any, Callable, Optional, Union
import torch
class Zamba2HybridLayer(nn.Module):
def __init__(self, shared_transformer: Zamba2AttentionDecoderLayer, linear: nn.Linear, mamba: Zamba2MambaDecoderLayer):
super().__init__()
self.linear = linear
self.mamba_decoder = mamba
self.shared_transformer = shared_transformer
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, original_hidden_states: Optional[torch.Tensor]=None, layer_idx: Optional[int]=None, attention_mask: Optional[torch.Tensor]=None, causal_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Zamba2HybridDynamicCache]=None, output_attentions: Optional[bool]=False, use_cache: Optional[bool]=False, position_embeddings: Optional[torch.LongTensor]=None) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]:
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
original_hidden_states (`torch.FloatTensor`): word embedding output that will be concatenated with
hidden activations to form the input of the shared transformer layer.
layer_idx (`int`): layer number.
attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
`(batch, sequence_length)` where padding elements are indicated by 0.
past_key_values (`Zamba2HybridDynamicCache`, *optional*): cached past key and value projection states
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
(see `past_key_values`).
position_embeddings (`tuple[torch.FloatTensor, torch.FloatTensor]`, *optional*):
Tuple containing the cosine and sine positional embeddings of shape `(batch_size, seq_len, head_dim)`,
with `head_dim` being the embedding dimension of each attention head.
"""
layer_outputs = self.shared_transformer(hidden_states, original_hidden_states=original_hidden_states, layer_idx=layer_idx, attention_mask=causal_mask, past_key_values=past_key_values, output_attentions=output_attentions, position_embeddings=position_embeddings)
transformer_hidden_states = layer_outputs[0]
if output_attentions:
self_attn_weights = layer_outputs[1]
transformer_hidden_states = self.linear(transformer_hidden_states)
layer_outputs = self.mamba_decoder(hidden_states, transformer_hidden_states=transformer_hidden_states, attention_mask=attention_mask, past_key_values=past_key_values, output_attentions=output_attentions, use_cache=use_cache, position_embeddings=position_embeddings)
if output_attentions:
layer_outputs = (layer_outputs[0], self_attn_weights) + layer_outputs[2:]
return layer_outputs
|
class Zamba2HybridLayer(nn.Module):
def __init__(self, shared_transformer: Zamba2AttentionDecoderLayer, linear: nn.Linear, mamba: Zamba2MambaDecoderLayer):
pass
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, original_hidden_states: Optional[torch.Tensor]=None, layer_idx: Optional[int]=None, attention_mask: Optional[torch.Tensor]=None, causal_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Zamba2HybridDynamicCache]=None, output_attentions: Optional[bool]=False, use_cache: Optional[bool]=False, position_embeddings: Optional[torch.LongTensor]=None) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]:
'''
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
original_hidden_states (`torch.FloatTensor`): word embedding output that will be concatenated with
hidden activations to form the input of the shared transformer layer.
layer_idx (`int`): layer number.
attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
`(batch, sequence_length)` where padding elements are indicated by 0.
past_key_values (`Zamba2HybridDynamicCache`, *optional*): cached past key and value projection states
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
(see `past_key_values`).
position_embeddings (`tuple[torch.FloatTensor, torch.FloatTensor]`, *optional*):
Tuple containing the cosine and sine positional embeddings of shape `(batch_size, seq_len, head_dim)`,
with `head_dim` being the embedding dimension of each attention head.
'''
pass
| 4
| 1
| 35
| 4
| 22
| 10
| 2
| 0.42
| 1
| 7
| 3
| 0
| 2
| 3
| 2
| 12
| 72
| 8
| 45
| 22
| 29
| 19
| 16
| 9
| 13
| 3
| 1
| 1
| 4
|
6,350
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/zamba2/modeling_zamba2.py
|
transformers.models.zamba2.modeling_zamba2.Zamba2MLP
|
import torch
from ...activations import ACT2FN
from .configuration_zamba2 import Zamba2Config
from typing import Any, Callable, Optional, Union
from torch import nn
class Zamba2MLP(nn.Module):
def __init__(self, config: Zamba2Config, num_fwd_mem_blocks=None, block_id: Optional[int]=None):
"""
This MLP layer contributes to tied transformer blocks aimed to increasing compute without increasing model size. Because this layer
is tied, un-tied adapter modules (formally same as LoRA, but used in the base model) are added to the up and gate projectors to increase expressivity with a small memory overhead.
"""
super().__init__()
self.config = config
self.hidden_size = config.hidden_size
self.intermediate_size = config.intermediate_size
self.num_fwd_mem_blocks = num_fwd_mem_blocks
self.block_id = block_id
self.gate_up_proj = nn.Linear(self.hidden_size, 2 * self.intermediate_size, bias=config.add_bias_linear)
self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=config.add_bias_linear)
self.act_fn = ACT2FN[config.hidden_act]
self.gate_up_proj_adapter_list = nn.ModuleList([])
for i in range(self.num_fwd_mem_blocks):
if i % config.num_mem_blocks == block_id:
gate_up_proj_adapter = nn.Sequential(nn.Linear(self.config.hidden_size, self.config.adapter_rank, bias=False), nn.Linear(self.config.adapter_rank, 2 * self.intermediate_size, bias=False))
else:
gate_up_proj_adapter = nn.Identity()
self.gate_up_proj_adapter_list.append(gate_up_proj_adapter)
layer_block_map = config.hybrid_layer_ids
self.layer_dic = {value: index for index, value in enumerate(layer_block_map)}
def forward(self, hidden_state, layer_idx=None):
gate_up_state = self.gate_up_proj(hidden_state)
layer_idx = self.layer_dic[layer_idx]
gate_up_state = gate_up_state + self.gate_up_proj_adapter_list[layer_idx](hidden_state)
gate_up_state = torch.chunk(gate_up_state, 2, dim=-1)
hidden_state = self.act_fn(gate_up_state[0]) * gate_up_state[1]
output = self.down_proj(hidden_state)
return output
|
class Zamba2MLP(nn.Module):
def __init__(self, config: Zamba2Config, num_fwd_mem_blocks=None, block_id: Optional[int]=None):
'''
This MLP layer contributes to tied transformer blocks aimed to increasing compute without increasing model size. Because this layer
is tied, un-tied adapter modules (formally same as LoRA, but used in the base model) are added to the up and gate projectors to increase expressivity with a small memory overhead.
'''
pass
def forward(self, hidden_state, layer_idx=None):
pass
| 3
| 1
| 19
| 2
| 15
| 2
| 2
| 0.13
| 1
| 5
| 1
| 0
| 2
| 10
| 2
| 12
| 40
| 5
| 31
| 18
| 28
| 4
| 27
| 18
| 24
| 3
| 1
| 2
| 4
|
6,351
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/zamba2/modeling_zamba2.py
|
transformers.models.zamba2.modeling_zamba2.Zamba2MambaDecoderLayer
|
from ...utils.deprecation import deprecate_kwarg
import torch
from .configuration_zamba2 import Zamba2Config
from typing import Any, Callable, Optional, Union
from torch import nn
class Zamba2MambaDecoderLayer(nn.Module):
def __init__(self, config: Zamba2Config, layer_idx: int):
super().__init__()
self.mamba = Zamba2MambaMixer(config=config, layer_idx=layer_idx)
self.input_layernorm = Zamba2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.layer_idx = layer_idx
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, original_hidden_states: Optional[torch.Tensor]=None, layer_idx: Optional[int]=None, attention_mask: Optional[torch.Tensor]=None, causal_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Zamba2HybridDynamicCache]=None, output_attentions: Optional[bool]=False, use_cache: Optional[bool]=False, cache_position: Optional[torch.LongTensor]=None, transformer_hidden_states: Optional[torch.Tensor]=None, **kwargs) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]:
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
`(batch, sequence_length)` where padding elements are indicated by 0.
past_key_values (`Zamba2HybridDynamicCache`, *optional*): cached past key and value projection states
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
(see `past_key_values`).
cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
Indices depicting the position of the input sequence tokens in the sequence.
"""
residual = hidden_states
hidden_states = hidden_states + transformer_hidden_states if transformer_hidden_states is not None else hidden_states
hidden_states = self.input_layernorm(hidden_states)
hidden_states = self.mamba(hidden_states=hidden_states, cache_params=past_key_values, attention_mask=attention_mask)
self_attn_weights = None
hidden_states = residual + hidden_states
outputs = (hidden_states,)
if output_attentions:
outputs += (self_attn_weights,)
if use_cache:
outputs += (past_key_values,)
return outputs
|
class Zamba2MambaDecoderLayer(nn.Module):
def __init__(self, config: Zamba2Config, layer_idx: int):
pass
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, original_hidden_states: Optional[torch.Tensor]=None, layer_idx: Optional[int]=None, attention_mask: Optional[torch.Tensor]=None, causal_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Zamba2HybridDynamicCache]=None, output_attentions: Optional[bool]=False, use_cache: Optional[bool]=False, cache_position: Optional[torch.LongTensor]=None, transformer_hidden_states: Optional[torch.Tensor]=None, **kwargs) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]:
'''
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
`(batch, sequence_length)` where padding elements are indicated by 0.
past_key_values (`Zamba2HybridDynamicCache`, *optional*): cached past key and value projection states
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
(see `past_key_values`).
cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
Indices depicting the position of the input sequence tokens in the sequence.
'''
pass
| 4
| 1
| 32
| 5
| 19
| 9
| 3
| 0.47
| 1
| 8
| 4
| 0
| 2
| 3
| 2
| 12
| 66
| 10
| 38
| 22
| 22
| 18
| 19
| 9
| 16
| 4
| 1
| 1
| 5
|
6,352
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/zamba2/modeling_zamba2.py
|
transformers.models.zamba2.modeling_zamba2.Zamba2MambaMixer
|
from torch import nn
from .configuration_zamba2 import Zamba2Config
from typing import Any, Callable, Optional, Union
import torch
class Zamba2MambaMixer(nn.Module):
"""
Compute ∆, A, B, C, and D the state space parameters and compute the `contextualized_states`.
A, D are input independent (see Mamba paper [1] Section 3.5.2 "Interpretation of A" for why A isn't selective)
∆, B, C are input-dependent (this is a key difference between Mamba and the linear time invariant S4,
and is why Mamba is called **selective** state spaces)
"""
def __init__(self, config: Zamba2Config, layer_idx: Optional[int]=None):
super().__init__()
self.config = config
self.hidden_size = config.hidden_size
self.ssm_state_size = config.mamba_d_state
self.conv_kernel_size = config.mamba_d_conv
self.intermediate_size = int(config.mamba_expand * self.hidden_size)
self.layer_idx = layer_idx
self.use_conv_bias = config.use_conv_bias
self.activation = 'silu'
self.act = nn.SiLU()
self.use_mem_eff_path = config.use_mem_eff_path
self.n_groups = config.mamba_ngroups
self.head_dim = config.mamba_headdim
self.num_heads = self.config.n_mamba_heads
self.chunk_size = config.chunk_size
self.time_step_limit = config.time_step_limit
self.time_step_min = config.time_step_min
self.time_step_max = config.time_step_max
self.conv_dim = self.intermediate_size + 2 * self.n_groups * self.ssm_state_size
self.conv1d = nn.Conv1d(in_channels=self.conv_dim, out_channels=self.conv_dim, bias=True, kernel_size=config.mamba_d_conv, groups=self.conv_dim, padding=config.mamba_d_conv - 1)
projection_size = self.intermediate_size + self.conv_dim + self.num_heads
self.in_proj = nn.Linear(self.hidden_size, projection_size, bias=config.add_bias_linear)
self.dt_bias = nn.Parameter(torch.ones(self.num_heads))
A = torch.arange(1, self.num_heads + 1)
self.A_log = nn.Parameter(torch.log(A))
self.norm = Zamba2RMSNormGated(self.intermediate_size, group_size=self.intermediate_size // self.n_groups, eps=1e-05)
self.D = nn.Parameter(torch.ones(self.num_heads))
self.out_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=config.add_bias_linear)
if not is_fast_path_available:
logger.warning_once('The fast path is not available because on of `(selective_state_update, causal_conv1d_fn, causal_conv1d_update)` is None. Falling back to the naive implementation. To install follow https://github.com/state-spaces/mamba/#installation and https://github.com/Dao-AILab/causal-conv1d')
def cuda_kernels_forward(self, hidden_states: torch.Tensor, cache_params: Optional[Zamba2HybridDynamicCache]=None, attention_mask: Optional[torch.Tensor]=None):
batch_size, seq_len, _ = hidden_states.shape
groups_time_state_size = self.n_groups * self.ssm_state_size
d_to_remove = 2 * self.intermediate_size + 2 * self.n_groups * self.ssm_state_size + self.num_heads
if cache_params is not None and cache_params.has_previous_state:
in_projected_states = self.in_proj(hidden_states.squeeze(1))
d_mlp = (in_projected_states.shape[-1] - d_to_remove) // 2
split_projection_dim = [d_mlp, d_mlp, self.intermediate_size, self.conv_dim, self.num_heads]
_, _, gate, hidden_states_B_C, dt = torch.split(in_projected_states, split_projection_dim, dim=-1)
hidden_states_B_C = causal_conv1d_update(hidden_states_B_C, cache_params.conv_states[self.layer_idx], self.conv1d.weight.squeeze(1), self.conv1d.bias, self.activation)
hidden_states, B, C = torch.split(hidden_states_B_C, [self.intermediate_size, groups_time_state_size, groups_time_state_size], dim=-1)
A = -torch.exp(self.A_log.float())
A = A[:, None, ...][:, :, None].expand(-1, self.head_dim, self.ssm_state_size).to(dtype=torch.float32)
dt = dt[:, :, None].expand(-1, -1, self.head_dim)
dt_bias = self.dt_bias[:, None, ...].expand(-1, self.head_dim)
D = self.D[:, None, ...].expand(-1, self.head_dim)
B = B.view(batch_size, self.n_groups, B.shape[1] // self.n_groups)
C = C.view(batch_size, self.n_groups, C.shape[1] // self.n_groups)
hidden_states_reshaped = hidden_states.view(batch_size, self.num_heads, self.head_dim)
hidden_states = selective_state_update(cache_params.ssm_states[self.layer_idx], hidden_states_reshaped, dt, A, B, C, D, z=None, dt_bias=dt_bias, dt_softplus=True)
hidden_states = hidden_states.view(batch_size, self.num_heads * self.head_dim)
hidden_states = self.norm(hidden_states, gate)
out = self.out_proj(hidden_states)[:, None, ...]
else:
if attention_mask is not None and (not torch.all(attention_mask == 1)):
dtype = hidden_states.dtype
hidden_states = (hidden_states * attention_mask[:, :, None]).to(dtype)
projected_states = self.in_proj(hidden_states)
A = -torch.exp(self.A_log.float())
dt_limit_kwargs = {} if self.time_step_limit is None else {'dt_limit': self.time_step_limit}
if attention_mask is not None:
input_not_masked = torch.all(attention_mask == 1)
else:
input_not_masked = True
if self.use_mem_eff_path and self.training and (cache_params is None) and input_not_masked:
out, ssm_state = mamba_split_conv1d_scan_combined(projected_states, self.conv1d.weight.squeeze(1), self.conv1d.bias, self.dt_bias, A, D=self.D, chunk_size=self.chunk_size, seq_idx=None, activation=self.activation, rmsnorm_weight=self.norm.weight, rmsnorm_eps=self.norm.variance_epsilon, outproj_weight=self.out_proj.weight, outproj_bias=self.out_proj.bias, headdim=self.head_dim, ngroups=self.n_groups, norm_before_gate=False, return_final_states=True, **dt_limit_kwargs)
else:
gate, hidden_states_B_C, time_step = torch.split(projected_states, [self.intermediate_size, self.conv_dim, self.num_heads], dim=-1)
if cache_params is not None:
hidden_states_B_C_t = hidden_states_B_C.transpose(1, 2)
conv_state = nn.functional.pad(hidden_states_B_C_t, (self.conv_kernel_size - hidden_states_B_C_t.shape[-1], 0))
cache_params.conv_states[self.layer_idx].copy_(conv_state)
if causal_conv1d_fn is None or self.activation not in ['silu', 'swish']:
hidden_states_B_C = self.act(self.conv1d(hidden_states_B_C.transpose(1, 2)).transpose(1, 2)[:, :seq_len])
else:
hidden_states_B_C = causal_conv1d_fn(x=hidden_states_B_C.transpose(1, 2), weight=self.conv1d.weight.squeeze(1), bias=self.conv1d.bias, activation=self.activation).transpose(1, 2)[:, :seq_len]
hidden_states, B, C = torch.split(hidden_states_B_C, [self.intermediate_size, groups_time_state_size, groups_time_state_size], dim=-1)
if attention_mask is not None and (not torch.all(attention_mask == 1)):
dtype = hidden_states.dtype
hidden_states = (hidden_states * attention_mask[:, :, None]).to(dtype)
scan_output, ssm_state = mamba_chunk_scan_combined(hidden_states.view(batch_size, seq_len, -1, self.head_dim), time_step, A, B.view(batch_size, seq_len, self.n_groups, -1), C.view(batch_size, seq_len, self.n_groups, -1), chunk_size=self.chunk_size, D=self.D, z=None, seq_idx=None, return_final_states=True, dt_bias=self.dt_bias, dt_softplus=True, **dt_limit_kwargs)
if ssm_state is not None and cache_params is not None:
cache_params.ssm_states[self.layer_idx].copy_(ssm_state)
scan_output = scan_output.view(batch_size, seq_len, -1)
scan_output = self.norm(scan_output, gate)
out = self.out_proj(scan_output)
return out
def torch_forward(self, input_states, cache_params: Optional[Zamba2HybridDynamicCache]=None, attention_mask: Optional[torch.Tensor]=None):
batch_size, seq_len, _ = input_states.shape
dtype = input_states.dtype
if cache_params is not None and cache_params.has_previous_state:
projected_states = self.in_proj(input_states.squeeze(1))
else:
if attention_mask is not None and (not torch.all(attention_mask == 1)):
input_states = (input_states * attention_mask[:, :, None]).to(dtype)
projected_states = self.in_proj(input_states)
d_mlp = (projected_states.shape[-1] - 2 * self.intermediate_size - 2 * self.n_groups * self.ssm_state_size - self.num_heads) // 2
_, _, gate, hidden_states, dt = projected_states.split([d_mlp, d_mlp, self.intermediate_size, self.conv_dim, self.num_heads], dim=-1)
if cache_params is not None:
ssm_state = cache_params.ssm_states[self.layer_idx].clone()
ssm_state = ssm_state.to(hidden_states.device)
if cache_params.has_previous_state:
gate = gate.unsqueeze(1)
conv_state = cache_params.conv_states[self.layer_idx]
conv_state = torch.roll(conv_state, shifts=-1, dims=-1)
conv_state[:, :, -1] = hidden_states[:, 0, :] if hidden_states.ndim == 3 else hidden_states
cache_params.conv_states[self.layer_idx].copy_(conv_state)
hidden_states = torch.sum(conv_state.to(projected_states.device) * self.conv1d.weight[:, 0, :], dim=-1)
if self.use_conv_bias:
hidden_states += self.conv1d.bias
hidden_states = self.act(hidden_states).to(dtype)[:, None, ...]
else:
hidden_states = hidden_states.transpose(1, 2)
conv_state = nn.functional.pad(hidden_states, (self.conv_kernel_size - hidden_states.shape[-1], 0))
cache_params.conv_states[self.layer_idx].copy_(conv_state)
hidden_states = self.act(self.conv1d(hidden_states).transpose(1, 2))[:, :seq_len, :]
if attention_mask is not None and (not torch.all(attention_mask == 1)):
dtype = hidden_states.dtype
hidden_states = (hidden_states * attention_mask[:, :, None]).to(dtype)
else:
ssm_state = torch.zeros((batch_size, self.num_heads, self.head_dim, self.ssm_state_size), device=hidden_states.device, dtype=dtype)
hidden_states = self.act(self.conv1d(hidden_states.transpose(1, 2))[..., :seq_len].transpose(1, 2))
hidden_states, B, C = torch.split(hidden_states, [self.intermediate_size, self.n_groups * self.ssm_state_size, self.n_groups * self.ssm_state_size], dim=-1)
A = -torch.exp(self.A_log.float())
if cache_params is not None and cache_params.has_previous_state:
dt = dt[:, None, ...] if dt.ndim == 2 else dt[:, 0, :][:, None, ...]
dt = dt.transpose(1, 2).expand(batch_size, dt.shape[-1], self.head_dim)
dt_bias = self.dt_bias[..., None].expand(self.dt_bias.shape[0], self.head_dim)
dt = torch.nn.functional.softplus(dt + dt_bias.to(dt.dtype))
dt = torch.clamp(dt, self.time_step_min)
A = A[..., None, None].expand(self.num_heads, self.head_dim, self.ssm_state_size).to(dtype=torch.float32)
dA = torch.exp(dt[..., None] * A)
B = B.reshape(batch_size, self.n_groups, -1)[..., None, :]
B = B.expand(batch_size, self.n_groups, self.num_heads // self.n_groups, B.shape[-1]).contiguous()
B = B.reshape(batch_size, -1, B.shape[-1])
dB = dt[..., None] * B[..., None, :]
hidden_states = hidden_states.reshape(batch_size, -1, self.head_dim)
dBx = dB * hidden_states[..., None]
cache_params.ssm_states[self.layer_idx].copy_(cache_params.ssm_states[self.layer_idx] * dA + dBx)
C = C.reshape(batch_size, self.n_groups, -1)[..., None, :]
C = C.expand(batch_size, self.n_groups, self.num_heads // self.n_groups, C.shape[-1]).contiguous()
C = C.reshape(batch_size, -1, C.shape[-1])
ssm_states = cache_params.ssm_states[self.layer_idx].to(C.dtype)
ssm_states_reshaped = ssm_states.view(batch_size * self.num_heads, self.head_dim, self.ssm_state_size)
C_reshaped = C.view(batch_size * self.num_heads, self.ssm_state_size, 1)
y = torch.bmm(ssm_states_reshaped, C_reshaped)
y = y.view(batch_size, self.num_heads, self.head_dim)
D = self.D[..., None].expand(self.D.shape[0], self.head_dim)
y = (y + hidden_states * D).to(y.dtype)
y = y.reshape(batch_size, -1)[:, None, ...]
else:
dt = nn.functional.softplus(dt + self.dt_bias)
dt = torch.clamp(dt, self.time_step_min)
hidden_states = hidden_states.reshape(batch_size, seq_len, -1, self.head_dim).float()
B = B.reshape(batch_size, seq_len, -1, self.ssm_state_size).float()
C = C.reshape(batch_size, seq_len, -1, self.ssm_state_size).float()
B = B.repeat_interleave(self.num_heads // self.n_groups, dim=2, output_size=self.num_heads)
C = C.repeat_interleave(self.num_heads // self.n_groups, dim=2, output_size=self.num_heads)
pad_size = (self.chunk_size - seq_len % self.chunk_size) % self.chunk_size
D_residual = self.D[..., None] * pad_tensor_by_size(hidden_states, pad_size)
hidden_states = hidden_states * dt[..., None]
A = A.to(hidden_states.dtype) * dt
hidden_states, A, B, C = [reshape_into_chunks(t, pad_size, self.chunk_size) for t in (hidden_states, A, B, C)]
A = A.permute(0, 3, 1, 2)
A_cumsum = torch.cumsum(A, dim=-1)
L = torch.exp(segment_sum(A))
G_intermediate = C[:, :, :, None, :, :] * B[:, :, None, :, :, :]
G = G_intermediate.sum(dim=-1)
M_intermediate = G[..., None] * L.permute(0, 2, 3, 4, 1)[..., None]
M = M_intermediate.sum(dim=-1)
Y_diag = (M[..., None] * hidden_states[:, :, None]).sum(3)
decay_states = torch.exp(A_cumsum[:, :, :, -1:] - A_cumsum)
B_decay_contraction = B * decay_states.permute(0, 2, 3, 1)[..., None]
states = (B_decay_contraction.permute(0, 1, 3, 2, 4)[..., None] * hidden_states.permute(0, 1, 3, 2, 4)[..., None, :]).sum(dim=3).permute(0, 1, 2, 4, 3)
if cache_params is not None and cache_params.has_previous_state:
previous_states = cache_params.ssm_states[self.layer_idx][:, None, ...]
else:
previous_states = torch.zeros_like(states[:, :1])
states = torch.cat([previous_states, states], dim=1)
decay_chunk = torch.exp(segment_sum(nn.functional.pad(A_cumsum[:, :, :, -1], (1, 0))))
states_permuted = states.permute(0, 2, 1, 3, 4)
result = (decay_chunk[..., None, None] * states_permuted[:, :, None, ...]).sum(dim=2)
new_states = result.permute(0, 2, 1, 3, 4)
states, ssm_state = (new_states[:, :-1], new_states[:, -1])
state_decay_out = torch.exp(A_cumsum)
C_times_states = C[..., None, :] * states[:, :, None, ...]
state_decay_out_permuted = state_decay_out.permute(0, 2, 3, 1)
Y_off = C_times_states.sum(-1) * state_decay_out_permuted[..., None]
y = Y_diag + Y_off
y = y.reshape(batch_size, -1, self.num_heads, self.head_dim)
y = y + D_residual
if pad_size > 0:
y = y[:, :seq_len, :, :]
y = y.reshape(batch_size, seq_len, -1)
if ssm_state is not None and cache_params is not None:
cache_params.ssm_states[self.layer_idx].copy_(ssm_state)
scan_output = self.norm(y, gate)
contextualized_states = self.out_proj(scan_output.to(dtype))
return contextualized_states
def forward(self, hidden_states, cache_params: Optional[Zamba2HybridDynamicCache]=None, attention_mask: Optional[torch.Tensor]=None):
if is_fast_path_available and 'cuda' in self.in_proj.weight.device.type:
return self.cuda_kernels_forward(hidden_states, cache_params, attention_mask)
return self.torch_forward(hidden_states, cache_params, attention_mask)
|
class Zamba2MambaMixer(nn.Module):
'''
Compute ∆, A, B, C, and D the state space parameters and compute the `contextualized_states`.
A, D are input independent (see Mamba paper [1] Section 3.5.2 "Interpretation of A" for why A isn't selective)
∆, B, C are input-dependent (this is a key difference between Mamba and the linear time invariant S4,
and is why Mamba is called **selective** state spaces)
'''
def __init__(self, config: Zamba2Config, layer_idx: Optional[int]=None):
pass
def cuda_kernels_forward(self, hidden_states: torch.Tensor, cache_params: Optional[Zamba2HybridDynamicCache]=None, attention_mask: Optional[torch.Tensor]=None):
pass
def torch_forward(self, input_states, cache_params: Optional[Zamba2HybridDynamicCache]=None, attention_mask: Optional[torch.Tensor]=None):
pass
def forward(self, hidden_states, cache_params: Optional[Zamba2HybridDynamicCache]=None, attention_mask: Optional[torch.Tensor]=None):
pass
| 5
| 1
| 104
| 11
| 79
| 18
| 7
| 0.25
| 1
| 6
| 3
| 0
| 4
| 25
| 4
| 14
| 428
| 49
| 315
| 105
| 300
| 80
| 198
| 95
| 193
| 13
| 1
| 3
| 27
|
6,353
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/zamba2/modeling_zamba2.py
|
transformers.models.zamba2.modeling_zamba2.Zamba2Model
|
from torch import nn
import re
import torch
from ...utils import auto_docstring, logging
from ...modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast, SequenceClassifierOutputWithPast
from .configuration_zamba2 import Zamba2Config
from typing import Any, Callable, Optional, Union
from itertools import cycle
from ...modeling_attn_mask_utils import AttentionMaskConverter
@auto_docstring
class Zamba2Model(Zamba2PreTrainedModel):
"""
Model consisting of *config.num_hidden_layers* layers.
Args:
config: Zamba2Config
"""
def __init__(self, config: Zamba2Config):
super().__init__(config)
self.config = config
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
blocks = [Zamba2AttentionDecoderLayer(config, block_id=k) for k in range(config.num_mem_blocks)]
mamba_layers = []
linear_layers = []
self.layers_block_type = config.layers_block_type
for i in range(config.num_hidden_layers):
if config.layers_block_type[i] == 'mamba':
mamba_layers.append(Zamba2MambaDecoderLayer(config, layer_idx=i))
elif config.layers_block_type[i] == 'hybrid':
linear_layers.append(nn.Linear(self.config.hidden_size, self.config.hidden_size, bias=False))
mamba_layers.append(Zamba2MambaDecoderLayer(config, layer_idx=i))
mamba_layers = iter(mamba_layers)
linear_layers = iter(linear_layers)
blocks = cycle(blocks)
layers = self.get_layers(blocks, linear_layers, mamba_layers)
self.layers = nn.ModuleList(layers)
self._attn_implementation = config._attn_implementation
self.final_layernorm = Zamba2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
if config.use_mem_rope:
if config.use_long_context:
logger.warning_once('`use_long_context` set to `True`: using rescaled `rope_theta` and extended `max_position_embeddings`.')
self.rotary_emb = Zamba2RotaryEmbedding(config)
self.gradient_checkpointing = False
self.post_init()
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Zamba2HybridDynamicCache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None) -> Union[tuple, BaseModelOutputWithPast]:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time, and must specify either one')
if self.gradient_checkpointing and self.training and use_cache:
logger.warning_once('`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`.')
use_cache = False
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids)
hidden_states = inputs_embeds
original_hidden_states = torch.clone(inputs_embeds)
if use_cache and past_key_values is None:
batch_size = input_ids.shape[0] if input_ids is not None else inputs_embeds.shape[0]
past_key_values = Zamba2HybridDynamicCache(self.config, batch_size, dtype=self.dtype, device=self.device)
if cache_position is None:
past_seen_tokens = past_key_values.get_seq_length(layer_idx=self.first_transformer_layer_id) if past_key_values is not None else 0
cache_position = torch.arange(past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device)
if position_ids is None:
position_ids = cache_position.unsqueeze(0)
causal_mask = self._update_causal_mask(attention_mask, inputs_embeds, cache_position)
if self.config.use_mem_rope:
position_embeddings = self.rotary_emb(hidden_states, position_ids)
else:
position_embeddings = None
all_hidden_states = () if output_hidden_states else None
all_self_attns = () if output_attentions else None
for layer_idx, layer in enumerate(self.layers):
if output_hidden_states:
all_hidden_states += (hidden_states,)
if self.gradient_checkpointing and self.training:
layer_outputs = self._gradient_checkpointing_func(layer.__call__, hidden_states, original_hidden_states, layer_idx, attention_mask, causal_mask, past_key_values, output_attentions, use_cache, position_embeddings)
else:
layer_outputs = layer(hidden_states, original_hidden_states=original_hidden_states, layer_idx=layer_idx, attention_mask=attention_mask, causal_mask=causal_mask, past_key_values=past_key_values, output_attentions=output_attentions, use_cache=use_cache, position_embeddings=position_embeddings)
hidden_states = layer_outputs[0]
if output_attentions:
if layer_outputs[1] is not None:
all_self_attns += (layer_outputs[1],)
hidden_states = self.final_layernorm(hidden_states)
if output_hidden_states:
all_hidden_states += (hidden_states,)
if past_key_values is not None and (not past_key_values.has_previous_state):
past_key_values.has_previous_state = True
output = BaseModelOutputWithPast(last_hidden_state=hidden_states, past_key_values=past_key_values if use_cache else None, hidden_states=all_hidden_states, attentions=all_self_attns)
return output if return_dict else output.to_tuple()
def _update_causal_mask(self, attention_mask, input_tensor, cache_position):
if self.config._attn_implementation == 'flash_attention_2':
if attention_mask is not None and 0.0 in attention_mask:
return attention_mask
return None
dtype, device = (input_tensor.dtype, input_tensor.device)
min_dtype = torch.finfo(dtype).min
sequence_length = input_tensor.shape[1]
target_length = cache_position[-1] + 1
causal_mask = torch.full((sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=device)
if sequence_length != 1:
causal_mask = torch.triu(causal_mask, diagonal=1)
causal_mask *= torch.arange(target_length, device=device) > cache_position.reshape(-1, 1)
causal_mask = causal_mask[None, None, :, :].expand(input_tensor.shape[0], 1, -1, -1)
if attention_mask is not None:
causal_mask = causal_mask.clone()
if attention_mask.dim() == 2:
mask_length = attention_mask.shape[-1]
padding_mask = causal_mask[..., :mask_length].eq(0.0) * attention_mask[:, None, None, :].eq(0.0)
causal_mask[..., :mask_length] = causal_mask[..., :mask_length].masked_fill(padding_mask, min_dtype)
if self.config._attn_implementation == 'sdpa' and attention_mask is not None and (attention_mask.device.type in ['cuda', 'xpu', 'npu']):
causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype)
return causal_mask
def get_layers(self, blocks, linear_layers, mamba_layers):
layers = []
self._tied_weights_keys = []
self.first_transformer_layer_id = 0
for layer_id, layer_type in enumerate(self.layers_block_type):
if layer_type == 'hybrid':
if self.first_transformer_layer_id == 0:
self.first_transformer_layer_id = layer_id
block = next(blocks)
if self.config.num_mem_blocks * len(self.config.hybrid_layer_ids) > 1:
prefix_pattern = f'^layers\\.{layer_id}\\.shared_transformer\\.'
main_keys_pattern = re.compile(prefix_pattern + '(?:' + 'self_attn\\.(?:q_proj|k_proj|v_proj|o_proj)\\.weight|' + 'feed_forward\\.(?:gate_up_proj|down_proj)\\.weight|' + '(?:input_layernorm|pre_ff_layernorm)\\.weight' + ')$')
self._tied_weights_keys.append(main_keys_pattern)
adapter_id = 0
for _layer_type in self.layers_block_type:
if _layer_type == 'hybrid' and adapter_id % self.config.num_mem_blocks == block.block_id:
adapter_pattern = re.compile('^shared_transformer\\.feed_forward\\.gate_up_proj_adapter_list\\.' + str(adapter_id) + '\\.(?:0|1)\\.weight$')
self._tied_weights_keys.append(adapter_pattern)
adapter_id += 1
if self.config.use_shared_attention_adapter:
adapter_id = 0
for _layer_type in self.layers_block_type:
if _layer_type == 'hybrid' and adapter_id % self.config.num_mem_blocks == block.block_id:
attn_adapter_pattern = re.compile('^shared_transformer\\.self_attn\\.' + '(?:linear_q_adapter_list|linear_k_adapter_list|linear_v_adapter_list)\\.' + str(adapter_id) + '\\.(?:0|1)\\.weight$')
self._tied_weights_keys.append(attn_adapter_pattern)
adapter_id += 1
layers.append(Zamba2HybridLayer(block, next(linear_layers), next(mamba_layers)))
else:
layers.append(next(mamba_layers))
return layers
|
@auto_docstring
class Zamba2Model(Zamba2PreTrainedModel):
'''
Model consisting of *config.num_hidden_layers* layers.
Args:
config: Zamba2Config
'''
def __init__(self, config: Zamba2Config):
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Zamba2HybridDynamicCache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None) -> Union[tuple, BaseModelOutputWithPast]:
pass
def _update_causal_mask(self, attention_mask, input_tensor, cache_position):
pass
def get_layers(self, blocks, linear_layers, mamba_layers):
pass
| 7
| 1
| 40
| 4
| 34
| 2
| 8
| 0.07
| 1
| 17
| 9
| 0
| 6
| 13
| 6
| 7
| 253
| 33
| 207
| 65
| 187
| 14
| 130
| 51
| 123
| 25
| 2
| 6
| 50
|
6,354
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/zamba2/modeling_zamba2.py
|
transformers.models.zamba2.modeling_zamba2.Zamba2PreTrainedModel
|
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from .configuration_zamba2 import Zamba2Config
import torch
import math
class Zamba2PreTrainedModel(PreTrainedModel):
config: Zamba2Config
base_model_prefix = 'model'
supports_gradient_checkpointing = True
_no_split_modules = ['Zamba2AttentionDecoderLayer', 'Zamba2MambaDecoderLayer']
_skip_keys_device_placement = 'past_key_values'
_supports_flash_attn = True
_supports_flex_attn = True
_supports_sdpa = True
_is_stateful = True
def _init_weights(self, module):
super()._init_weights(module)
if isinstance(module, Zamba2MambaMixer):
dt = torch.exp(torch.rand(self.config.n_mamba_heads) * (math.log(self.config.time_step_max) - math.log(self.config.time_step_min)) + math.log(self.config.time_step_min)).clamp(min=self.config.time_step_floor)
inv_dt = dt + torch.log(-torch.expm1(-dt))
module.dt_bias.data.copy_(inv_dt)
A = torch.arange(1, module.num_heads + 1)
module.A_log.data.copy_(torch.log(A))
module.D.data.fill_(1.0)
|
class Zamba2PreTrainedModel(PreTrainedModel):
def _init_weights(self, module):
pass
| 2
| 0
| 25
| 2
| 22
| 1
| 6
| 0.06
| 1
| 1
| 1
| 3
| 1
| 0
| 1
| 1
| 37
| 3
| 33
| 15
| 31
| 2
| 27
| 15
| 25
| 6
| 1
| 2
| 6
|
6,355
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/zamba2/modeling_zamba2.py
|
transformers.models.zamba2.modeling_zamba2.Zamba2RMSNorm
|
import torch
from torch import nn
class Zamba2RMSNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-06):
"""
Zamba2RMSNorm is equivalent to T5LayerNorm
"""
super().__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.variance_epsilon = eps
def forward(self, hidden_states):
input_dtype = hidden_states.dtype
hidden_states = hidden_states.to(torch.float32)
variance = hidden_states.pow(2).mean(-1, keepdim=True)
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
return self.weight * hidden_states.to(input_dtype)
def extra_repr(self):
return f'{tuple(self.weight.shape)}, eps={self.variance_epsilon}'
|
class Zamba2RMSNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-06):
'''
Zamba2RMSNorm is equivalent to T5LayerNorm
'''
pass
def forward(self, hidden_states):
pass
def extra_repr(self):
pass
| 4
| 1
| 5
| 0
| 4
| 1
| 1
| 0.23
| 1
| 2
| 0
| 0
| 3
| 2
| 3
| 13
| 18
| 2
| 13
| 8
| 9
| 3
| 13
| 8
| 9
| 1
| 1
| 0
| 3
|
6,356
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/zamba2/modeling_zamba2.py
|
transformers.models.zamba2.modeling_zamba2.Zamba2RotaryEmbedding
|
from .configuration_zamba2 import Zamba2Config
from ...modeling_rope_utils import ROPE_INIT_FUNCTIONS, dynamic_rope_update
import torch
from torch import nn
class Zamba2RotaryEmbedding(nn.Module):
inv_freq: torch.Tensor
def __init__(self, config: Zamba2Config, device=None):
super().__init__()
if hasattr(config, 'rope_scaling') and isinstance(config.rope_scaling, dict):
self.rope_type = config.rope_scaling.get('rope_type', config.rope_scaling.get('type'))
else:
self.rope_type = 'default'
self.max_seq_len_cached = config.max_position_embeddings
self.original_max_seq_len = config.max_position_embeddings
self.config = config
self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device)
self.register_buffer('inv_freq', inv_freq, persistent=False)
self.original_inv_freq = self.inv_freq
@torch.no_grad()
@dynamic_rope_update
def forward(self, x, position_ids):
inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device)
position_ids_expanded = position_ids[:, None, :].float()
device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != 'mps' else 'cpu'
with torch.autocast(device_type=device_type, enabled=False):
freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
emb = torch.cat((freqs, freqs), dim=-1)
cos = emb.cos() * self.attention_scaling
sin = emb.sin() * self.attention_scaling
return (cos.to(dtype=x.dtype), sin.to(dtype=x.dtype))
|
class Zamba2RotaryEmbedding(nn.Module):
def __init__(self, config: Zamba2Config, device=None):
pass
@torch.no_grad()
@dynamic_rope_update
def forward(self, x, position_ids):
pass
| 5
| 0
| 20
| 2
| 15
| 5
| 3
| 0.33
| 1
| 4
| 1
| 0
| 3
| 7
| 3
| 13
| 65
| 7
| 46
| 25
| 37
| 15
| 38
| 20
| 34
| 3
| 1
| 1
| 8
|
6,357
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/zamba2/modular_zamba2.py
|
transformers.models.zamba2.modular_zamba2.Zamba2Attention
|
from ..llama.modeling_llama import LlamaRotaryEmbedding, apply_rotary_pos_emb
from ...modeling_flash_attention_utils import FlashAttentionKwargs
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from ...utils.deprecation import deprecate_kwarg
from ..zamba.modeling_zamba import ZambaAttention, ZambaAttentionDecoderLayer, ZambaForCausalLM, ZambaForSequenceClassification, ZambaHybridDynamicCache, ZambaHybridLayer, ZambaMambaDecoderLayer, ZambaModel, ZambaRMSNorm, eager_attention_forward
import torch
from ...processing_utils import Unpack
from .configuration_zamba2 import Zamba2Config
from torch import nn
from typing import Callable, Optional, Union
class Zamba2Attention(ZambaAttention):
"""
Multi-headed attention from 'Attention Is All You Need' paper.
Adapted from transformers.models.mistral.modeling_mistral.MistralAttention:
The input dimension here is attention_hidden_size = 2 * hidden_size, and head_dim = attention_hidden_size // num_heads.
The extra factor of 2 comes from the input being the concatenation of original_hidden_states with the output of the previous (mamba) layer
(see fig. 2 in https://huggingface.co/papers/2405.16712).
Additionally, replaced
attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim) with
attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim/2)
Finally, this attention layer contributes to tied transformer blocks aimed to increasing compute without increasing model size. Because this
layer is tied, un-tied adapters (formally the same as LoRA but used in the base model) modules are added to the q, k, v projectors to increase
expressivity with a small memory overhead (see Fig. 2 of https://huggingface.co/papers/2411.15242).
"""
def __init__(self, config: Zamba2Config, layer_idx: Optional[int]=None, num_fwd_mem_blocks: Optional[int]=None, block_id: Optional[int]=None):
super().__init__(config, layer_idx)
self.num_fwd_mem_blocks = num_fwd_mem_blocks
self.layer_block_map = config.hybrid_layer_ids
self.block_id = block_id
if config.use_shared_attention_adapter:
self.linear_q_adapter_list = nn.ModuleList([])
self.linear_k_adapter_list = nn.ModuleList([])
self.linear_v_adapter_list = nn.ModuleList([])
for i in range(self.num_fwd_mem_blocks):
if i % config.num_mem_blocks == block_id:
linear_q_adapter = nn.Sequential(nn.Linear(self.attention_hidden_size, self.config.adapter_rank, bias=False), nn.Linear(self.config.adapter_rank, self.attention_hidden_size, bias=False))
linear_k_adapter = nn.Sequential(nn.Linear(self.attention_hidden_size, self.config.adapter_rank, bias=False), nn.Linear(self.config.adapter_rank, self.attention_hidden_size, bias=False))
linear_v_adapter = nn.Sequential(nn.Linear(self.attention_hidden_size, self.config.adapter_rank, bias=False), nn.Linear(self.config.adapter_rank, self.attention_hidden_size, bias=False))
else:
linear_q_adapter = nn.Identity()
linear_k_adapter = nn.Identity()
linear_v_adapter = nn.Identity()
self.linear_q_adapter_list.append(linear_q_adapter)
self.linear_k_adapter_list.append(linear_k_adapter)
self.linear_v_adapter_list.append(linear_v_adapter)
self.layer_dic = {value: index for index, value in enumerate(self.layer_block_map)}
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, layer_idx: int, attention_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Zamba2HybridDynamicCache]=None, position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]]=None, **kwargs: Unpack[FlashAttentionKwargs]) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, self.head_dim)
query_states = self.q_proj(hidden_states)
key_states = self.k_proj(hidden_states)
value_states = self.v_proj(hidden_states)
if self.config.use_shared_attention_adapter:
adapter_layer_idx = self.layer_dic[layer_idx]
query_states = query_states + self.linear_q_adapter_list[adapter_layer_idx](hidden_states)
key_states = key_states + self.linear_k_adapter_list[adapter_layer_idx](hidden_states)
value_states = value_states + self.linear_v_adapter_list[adapter_layer_idx](hidden_states)
query_states = query_states.view(hidden_shape).transpose(1, 2)
key_states = key_states.view(hidden_shape).transpose(1, 2)
value_states = value_states.view(hidden_shape).transpose(1, 2)
if self.config.use_mem_rope:
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
if past_key_values is not None:
key_states, value_states = past_key_values.update(key_states, value_states, layer_idx)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != 'eager':
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(self, query_states, key_states, value_states, attention_mask, dropout=0.0 if not self.training else self.attention_dropout, scaling=self.scaling, **kwargs)
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
attn_output = self.o_proj(attn_output)
return (attn_output, attn_weights)
|
class Zamba2Attention(ZambaAttention):
'''
Multi-headed attention from 'Attention Is All You Need' paper.
Adapted from transformers.models.mistral.modeling_mistral.MistralAttention:
The input dimension here is attention_hidden_size = 2 * hidden_size, and head_dim = attention_hidden_size // num_heads.
The extra factor of 2 comes from the input being the concatenation of original_hidden_states with the output of the previous (mamba) layer
(see fig. 2 in https://huggingface.co/papers/2405.16712).
Additionally, replaced
attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim) with
attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim/2)
Finally, this attention layer contributes to tied transformer blocks aimed to increasing compute without increasing model size. Because this
layer is tied, un-tied adapters (formally the same as LoRA but used in the base model) modules are added to the q, k, v projectors to increase
expressivity with a small memory overhead (see Fig. 2 of https://huggingface.co/papers/2411.15242).
'''
def __init__(self, config: Zamba2Config, layer_idx: Optional[int]=None, num_fwd_mem_blocks: Optional[int]=None, block_id: Optional[int]=None):
pass
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, layer_idx: int, attention_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Zamba2HybridDynamicCache]=None, position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]]=None, **kwargs: Unpack[FlashAttentionKwargs]) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
pass
| 4
| 1
| 48
| 5
| 43
| 0
| 6
| 0.15
| 1
| 8
| 3
| 0
| 2
| 9
| 2
| 14
| 113
| 13
| 87
| 39
| 70
| 13
| 50
| 23
| 47
| 7
| 2
| 3
| 11
|
6,358
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/zamba2/modular_zamba2.py
|
transformers.models.zamba2.modular_zamba2.Zamba2AttentionDecoderLayer
|
from ..zamba.modeling_zamba import ZambaAttention, ZambaAttentionDecoderLayer, ZambaForCausalLM, ZambaForSequenceClassification, ZambaHybridDynamicCache, ZambaHybridLayer, ZambaMambaDecoderLayer, ZambaModel, ZambaRMSNorm, eager_attention_forward
from typing import Callable, Optional, Union
from .configuration_zamba2 import Zamba2Config
from ...modeling_flash_attention_utils import FlashAttentionKwargs
import torch
from ...processing_utils import Unpack
from ...utils.deprecation import deprecate_kwarg
class Zamba2AttentionDecoderLayer(ZambaAttentionDecoderLayer):
def __init__(self, config: Zamba2Config, block_id: Optional[int]=None, layer_idx: Optional[int]=None):
self.block_id = block_id
num_gs = len(config.hybrid_layer_ids)
super().__init__(config, layer_idx)
self.self_attn = Zamba2Attention(config, layer_idx=-1, num_fwd_mem_blocks=num_gs, block_id=block_id)
self.feed_forward = Zamba2MLP(config, num_fwd_mem_blocks=num_gs, block_id=block_id)
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, original_hidden_states: torch.Tensor, layer_idx: int, attention_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Zamba2HybridDynamicCache]=None, output_attentions: Optional[bool]=False, position_embeddings: Optional[torch.LongTensor]=None, **kwargs: Unpack[FlashAttentionKwargs]) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]:
"""
Args:
hidden_states (`torch.FloatTensor`): output of previous Mamba layer of shape `(batch, seq_len, embed_dim)`
original_hidden_states (`torch.FloatTensor`): word embedding output of shape `(batch, seq_len, embed_dim)`.
This is concatenated with `hidden_states` (which is the output of the previous (mamba) layer). The
concatenated tensor is then used as input of the pre-attention RMSNorm
(see fig. 2 in https://huggingface.co/papers/2405.16712).
attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
`(batch, sequence_length)` where padding elements are indicated by 0.
past_key_values (`Zamba2HybridDynamicCache`, *optional*): cached past key and value projection states
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
(see `past_key_values`).
position_embeddings (`tuple[torch.FloatTensor, torch.FloatTensor]`, *optional*):
Tuple containing the cosine and sine positional embeddings of shape `(batch_size, seq_len, head_dim)`,
with `head_dim` being the embedding dimension of each attention head.
"""
hidden_states = torch.concatenate([hidden_states, original_hidden_states], dim=-1)
hidden_states = self.input_layernorm(hidden_states)
hidden_states, self_attn_weights = self.self_attn(hidden_states=hidden_states, layer_idx=layer_idx, attention_mask=attention_mask, past_key_values=past_key_values, output_attentions=output_attentions, position_embeddings=position_embeddings, **kwargs)
hidden_states = self.pre_ff_layernorm(hidden_states)
hidden_states = self.feed_forward(hidden_states, layer_idx)
outputs = (hidden_states,)
if output_attentions:
outputs += (self_attn_weights,)
return outputs
|
class Zamba2AttentionDecoderLayer(ZambaAttentionDecoderLayer):
def __init__(self, config: Zamba2Config, block_id: Optional[int]=None, layer_idx: Optional[int]=None):
pass
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, original_hidden_states: torch.Tensor, layer_idx: int, attention_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Zamba2HybridDynamicCache]=None, output_attentions: Optional[bool]=False, position_embeddings: Optional[torch.LongTensor]=None, **kwargs: Unpack[FlashAttentionKwargs]) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]:
'''
Args:
hidden_states (`torch.FloatTensor`): output of previous Mamba layer of shape `(batch, seq_len, embed_dim)`
original_hidden_states (`torch.FloatTensor`): word embedding output of shape `(batch, seq_len, embed_dim)`.
This is concatenated with `hidden_states` (which is the output of the previous (mamba) layer). The
concatenated tensor is then used as input of the pre-attention RMSNorm
(see fig. 2 in https://huggingface.co/papers/2405.16712).
attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
`(batch, sequence_length)` where padding elements are indicated by 0.
past_key_values (`Zamba2HybridDynamicCache`, *optional*): cached past key and value projection states
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
(see `past_key_values`).
position_embeddings (`tuple[torch.FloatTensor, torch.FloatTensor]`, *optional*):
Tuple containing the cosine and sine positional embeddings of shape `(batch_size, seq_len, head_dim)`,
with `head_dim` being the embedding dimension of each attention head.
'''
pass
| 4
| 1
| 29
| 2
| 17
| 10
| 2
| 0.57
| 1
| 9
| 5
| 0
| 2
| 3
| 2
| 14
| 60
| 5
| 35
| 19
| 22
| 20
| 17
| 9
| 14
| 2
| 2
| 1
| 3
|
6,359
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/zamba2/modular_zamba2.py
|
transformers.models.zamba2.modular_zamba2.Zamba2ForCausalLM
|
from ..zamba.modeling_zamba import ZambaAttention, ZambaAttentionDecoderLayer, ZambaForCausalLM, ZambaForSequenceClassification, ZambaHybridDynamicCache, ZambaHybridLayer, ZambaMambaDecoderLayer, ZambaModel, ZambaRMSNorm, eager_attention_forward
class Zamba2ForCausalLM(ZambaForCausalLM):
pass
|
class Zamba2ForCausalLM(ZambaForCausalLM):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 11
| 2
| 0
| 2
| 1
| 1
| 0
| 2
| 1
| 1
| 0
| 3
| 0
| 0
|
6,360
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/zamba2/modular_zamba2.py
|
transformers.models.zamba2.modular_zamba2.Zamba2ForSequenceClassification
|
from ..zamba.modeling_zamba import ZambaAttention, ZambaAttentionDecoderLayer, ZambaForCausalLM, ZambaForSequenceClassification, ZambaHybridDynamicCache, ZambaHybridLayer, ZambaMambaDecoderLayer, ZambaModel, ZambaRMSNorm, eager_attention_forward
class Zamba2ForSequenceClassification(ZambaForSequenceClassification):
pass
|
class Zamba2ForSequenceClassification(ZambaForSequenceClassification):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 6
| 2
| 0
| 2
| 1
| 1
| 0
| 2
| 1
| 1
| 0
| 3
| 0
| 0
|
6,361
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/zamba2/modular_zamba2.py
|
transformers.models.zamba2.modular_zamba2.Zamba2HybridDynamicCache
|
from ..zamba.modeling_zamba import ZambaAttention, ZambaAttentionDecoderLayer, ZambaForCausalLM, ZambaForSequenceClassification, ZambaHybridDynamicCache, ZambaHybridLayer, ZambaMambaDecoderLayer, ZambaModel, ZambaRMSNorm, eager_attention_forward
import torch
from .configuration_zamba2 import Zamba2Config
from typing import Callable, Optional, Union
class Zamba2HybridDynamicCache(ZambaHybridDynamicCache):
"""
A dynamic cache that can handle both the attention cache (which has a seq_len dimension) and the mamba cache
(which has a constant shape regardless of seq_len).
This cache has two sets of lists of tensors: `key_cache` and `value_cache` for attention cache and `conv_states`
and `ssm_states` for mamba cache. Each of these lists has `num_layers` tensors. The expected shape for each tensor
For attention layers, `key_cache` and `value_cache` have a shape of `(batch_size, num_heads, seq_len, head_dim)`,
while `conv_states` and `ssm_states` have a shape of `(batch_size, 0)` (empty tensors).
For mamba layers, `key_cache` and `value_cache` have a shape of `(batch_size, 0)` (empty tensors),
while `conv_states` represents the convolution state and has a shape of `(batch_size, d_inner, d_conv)`,
and `ssm_states` represents the ssm state and has a shape of `(batch_size, d_inner, d_state)`.
"""
def __init__(self, config: Zamba2Config, batch_size: int, dtype: torch.dtype=torch.float16, device: Optional[str]=None):
self.dtype = dtype
self.layers_block_type = config.layers_block_type
self.has_previous_state = False
self.intermediate_size = int(config.mamba_expand * config.hidden_size)
self.ssm_state_size = config.mamba_d_state
self.conv_kernel_size = config.mamba_d_conv
self.n_mamba_heads = config.n_mamba_heads
self.transformer_layers = []
self._modules = {}
self._parameters = {}
self._buffers = {}
self.conv_states = {}
self.ssm_states = {}
for i in range(config.num_hidden_layers):
self.conv_states[i] = torch.zeros(batch_size, self.intermediate_size + 2 * config.mamba_ngroups * config.mamba_d_state, self.conv_kernel_size, device=device, dtype=dtype)
self.ssm_states[i] = torch.zeros(batch_size, self.n_mamba_heads, config.mamba_headdim, self.ssm_state_size, device=device, dtype=dtype)
if self.layers_block_type[i] == 'hybrid':
self.transformer_layers.append(i)
self.key_cache = [torch.tensor([[]] * batch_size, device=device) for _ in range(config.num_hidden_layers)]
self.value_cache = [torch.tensor([[]] * batch_size, device=device) for _ in range(config.num_hidden_layers)]
def update_conv_state(self, layer_idx: int, new_conv_state: torch.Tensor, cache_position: torch.LongTensor) -> torch.Tensor:
conv_state = self.conv_states[layer_idx]
cache_position = cache_position.clamp(0, self.conv_kernel_size - 1)
conv_state = conv_state.roll(shifts=-1, dims=-1)
conv_state[:, :, cache_position] = new_conv_state.to(conv_state.device)
self.conv_states[layer_idx].zero_()
self.conv_states[layer_idx] += conv_state
return self.conv_states[layer_idx]
def reset(self):
self.conv_states.zero_()
self.ssm_states.zero_()
def get_seq_length(self, layer_idx: Optional[int]=0) -> int:
"""Returns the sequence length of the cached states. A layer index can be optionally passed."""
layer_idx = self.transformer_layers[0] if layer_idx not in self.transformer_layers else layer_idx
if len(self.key_cache) <= layer_idx or self.key_cache[layer_idx].numel() == 0:
return 0
return self.key_cache[layer_idx].shape[-2]
| null | 5
| 2
| 13
| 0
| 12
| 1
| 2
| 0.26
| 1
| 5
| 1
| 0
| 4
| 15
| 4
| 41
| 69
| 6
| 50
| 26
| 41
| 13
| 38
| 22
| 33
| 3
| 4
| 2
| 8
|
6,362
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/zamba2/modular_zamba2.py
|
transformers.models.zamba2.modular_zamba2.Zamba2HybridLayer
|
from torch import nn
from typing import Callable, Optional, Union
from ..zamba.modeling_zamba import ZambaAttention, ZambaAttentionDecoderLayer, ZambaForCausalLM, ZambaForSequenceClassification, ZambaHybridDynamicCache, ZambaHybridLayer, ZambaMambaDecoderLayer, ZambaModel, ZambaRMSNorm, eager_attention_forward
import torch
from ...utils.deprecation import deprecate_kwarg
class Zamba2HybridLayer(ZambaHybridLayer):
def __init__(self, shared_transformer: Zamba2AttentionDecoderLayer, linear: nn.Linear, mamba: Zamba2MambaDecoderLayer):
super().__init__(shared_transformer, linear, mamba)
del self.shared_transf
self.shared_transformer = shared_transformer
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, original_hidden_states: Optional[torch.Tensor]=None, layer_idx: Optional[int]=None, attention_mask: Optional[torch.Tensor]=None, causal_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Zamba2HybridDynamicCache]=None, output_attentions: Optional[bool]=False, use_cache: Optional[bool]=False, position_embeddings: Optional[torch.LongTensor]=None) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]:
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
original_hidden_states (`torch.FloatTensor`): word embedding output that will be concatenated with
hidden activations to form the input of the shared transformer layer.
layer_idx (`int`): layer number.
attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
`(batch, sequence_length)` where padding elements are indicated by 0.
past_key_values (`Zamba2HybridDynamicCache`, *optional*): cached past key and value projection states
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
(see `past_key_values`).
position_embeddings (`tuple[torch.FloatTensor, torch.FloatTensor]`, *optional*):
Tuple containing the cosine and sine positional embeddings of shape `(batch_size, seq_len, head_dim)`,
with `head_dim` being the embedding dimension of each attention head.
"""
layer_outputs = self.shared_transformer(hidden_states, original_hidden_states=original_hidden_states, layer_idx=layer_idx, attention_mask=causal_mask, past_key_values=past_key_values, output_attentions=output_attentions, position_embeddings=position_embeddings)
transformer_hidden_states = layer_outputs[0]
if output_attentions:
self_attn_weights = layer_outputs[1]
transformer_hidden_states = self.linear(transformer_hidden_states)
layer_outputs = self.mamba_decoder(hidden_states, transformer_hidden_states=transformer_hidden_states, attention_mask=attention_mask, past_key_values=past_key_values, output_attentions=output_attentions, use_cache=use_cache, position_embeddings=position_embeddings)
if output_attentions:
layer_outputs = (layer_outputs[0], self_attn_weights) + layer_outputs[2:]
return layer_outputs
|
class Zamba2HybridLayer(ZambaHybridLayer):
def __init__(self, shared_transformer: Zamba2AttentionDecoderLayer, linear: nn.Linear, mamba: Zamba2MambaDecoderLayer):
pass
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, original_hidden_states: Optional[torch.Tensor]=None, layer_idx: Optional[int]=None, attention_mask: Optional[torch.Tensor]=None, causal_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Zamba2HybridDynamicCache]=None, output_attentions: Optional[bool]=False, use_cache: Optional[bool]=False, position_embeddings: Optional[torch.LongTensor]=None) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]:
'''
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
original_hidden_states (`torch.FloatTensor`): word embedding output that will be concatenated with
hidden activations to form the input of the shared transformer layer.
layer_idx (`int`): layer number.
attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
`(batch, sequence_length)` where padding elements are indicated by 0.
past_key_values (`Zamba2HybridDynamicCache`, *optional*): cached past key and value projection states
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
(see `past_key_values`).
position_embeddings (`tuple[torch.FloatTensor, torch.FloatTensor]`, *optional*):
Tuple containing the cosine and sine positional embeddings of shape `(batch_size, seq_len, head_dim)`,
with `head_dim` being the embedding dimension of each attention head.
'''
pass
| 4
| 1
| 35
| 4
| 22
| 10
| 2
| 0.43
| 1
| 7
| 3
| 0
| 2
| 1
| 2
| 14
| 71
| 8
| 44
| 20
| 28
| 19
| 15
| 7
| 12
| 3
| 2
| 1
| 4
|
6,363
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/zamba2/modular_zamba2.py
|
transformers.models.zamba2.modular_zamba2.Zamba2MLP
|
import torch
from typing import Callable, Optional, Union
from ...activations import ACT2FN
from .configuration_zamba2 import Zamba2Config
from torch import nn
class Zamba2MLP(nn.Module):
def __init__(self, config: Zamba2Config, num_fwd_mem_blocks=None, block_id: Optional[int]=None):
"""
This MLP layer contributes to tied transformer blocks aimed to increasing compute without increasing model size. Because this layer
is tied, un-tied adapter modules (formally same as LoRA, but used in the base model) are added to the up and gate projectors to increase expressivity with a small memory overhead.
"""
super().__init__()
self.config = config
self.hidden_size = config.hidden_size
self.intermediate_size = config.intermediate_size
self.num_fwd_mem_blocks = num_fwd_mem_blocks
self.block_id = block_id
self.gate_up_proj = nn.Linear(self.hidden_size, 2 * self.intermediate_size, bias=config.add_bias_linear)
self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=config.add_bias_linear)
self.act_fn = ACT2FN[config.hidden_act]
self.gate_up_proj_adapter_list = nn.ModuleList([])
for i in range(self.num_fwd_mem_blocks):
if i % config.num_mem_blocks == block_id:
gate_up_proj_adapter = nn.Sequential(nn.Linear(self.config.hidden_size, self.config.adapter_rank, bias=False), nn.Linear(self.config.adapter_rank, 2 * self.intermediate_size, bias=False))
else:
gate_up_proj_adapter = nn.Identity()
self.gate_up_proj_adapter_list.append(gate_up_proj_adapter)
layer_block_map = config.hybrid_layer_ids
self.layer_dic = {value: index for index, value in enumerate(layer_block_map)}
def forward(self, hidden_state, layer_idx=None):
gate_up_state = self.gate_up_proj(hidden_state)
layer_idx = self.layer_dic[layer_idx]
gate_up_state = gate_up_state + self.gate_up_proj_adapter_list[layer_idx](hidden_state)
gate_up_state = torch.chunk(gate_up_state, 2, dim=-1)
hidden_state = self.act_fn(gate_up_state[0]) * gate_up_state[1]
output = self.down_proj(hidden_state)
return output
|
class Zamba2MLP(nn.Module):
def __init__(self, config: Zamba2Config, num_fwd_mem_blocks=None, block_id: Optional[int]=None):
'''
This MLP layer contributes to tied transformer blocks aimed to increasing compute without increasing model size. Because this layer
is tied, un-tied adapter modules (formally same as LoRA, but used in the base model) are added to the up and gate projectors to increase expressivity with a small memory overhead.
'''
pass
def forward(self, hidden_state, layer_idx=None):
pass
| 3
| 1
| 19
| 2
| 15
| 2
| 2
| 0.13
| 1
| 5
| 1
| 0
| 2
| 10
| 2
| 12
| 40
| 5
| 31
| 18
| 28
| 4
| 27
| 18
| 24
| 3
| 1
| 2
| 4
|
6,364
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/zamba2/modular_zamba2.py
|
transformers.models.zamba2.modular_zamba2.Zamba2MambaDecoderLayer
|
from ..zamba.modeling_zamba import ZambaAttention, ZambaAttentionDecoderLayer, ZambaForCausalLM, ZambaForSequenceClassification, ZambaHybridDynamicCache, ZambaHybridLayer, ZambaMambaDecoderLayer, ZambaModel, ZambaRMSNorm, eager_attention_forward
from .configuration_zamba2 import Zamba2Config
class Zamba2MambaDecoderLayer(ZambaMambaDecoderLayer):
def __init__(self, config: Zamba2Config, layer_idx: int):
super().__init__(config, layer_idx)
self.mamba = Zamba2MambaMixer(config=config, layer_idx=layer_idx)
self.input_layernorm = Zamba2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
|
class Zamba2MambaDecoderLayer(ZambaMambaDecoderLayer):
def __init__(self, config: Zamba2Config, layer_idx: int):
pass
| 2
| 0
| 4
| 0
| 4
| 0
| 1
| 0
| 1
| 5
| 3
| 0
| 1
| 2
| 1
| 13
| 5
| 0
| 5
| 4
| 3
| 0
| 5
| 4
| 3
| 1
| 2
| 0
| 1
|
6,365
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/zamba2/modular_zamba2.py
|
transformers.models.zamba2.modular_zamba2.Zamba2MambaMixer
|
from .configuration_zamba2 import Zamba2Config
from ..mamba2.modeling_mamba2 import pad_tensor_by_size, reshape_into_chunks, segment_sum
from torch import nn
from typing import Callable, Optional, Union
import torch
class Zamba2MambaMixer(nn.Module):
"""
Compute ∆, A, B, C, and D the state space parameters and compute the `contextualized_states`.
A, D are input independent (see Mamba paper [1] Section 3.5.2 "Interpretation of A" for why A isn't selective)
∆, B, C are input-dependent (this is a key difference between Mamba and the linear time invariant S4,
and is why Mamba is called **selective** state spaces)
"""
def __init__(self, config: Zamba2Config, layer_idx: Optional[int]=None):
super().__init__()
self.config = config
self.hidden_size = config.hidden_size
self.ssm_state_size = config.mamba_d_state
self.conv_kernel_size = config.mamba_d_conv
self.intermediate_size = int(config.mamba_expand * self.hidden_size)
self.layer_idx = layer_idx
self.use_conv_bias = config.use_conv_bias
self.activation = 'silu'
self.act = nn.SiLU()
self.use_mem_eff_path = config.use_mem_eff_path
self.n_groups = config.mamba_ngroups
self.head_dim = config.mamba_headdim
self.num_heads = self.config.n_mamba_heads
self.chunk_size = config.chunk_size
self.time_step_limit = config.time_step_limit
self.time_step_min = config.time_step_min
self.time_step_max = config.time_step_max
self.conv_dim = self.intermediate_size + 2 * self.n_groups * self.ssm_state_size
self.conv1d = nn.Conv1d(in_channels=self.conv_dim, out_channels=self.conv_dim, bias=True, kernel_size=config.mamba_d_conv, groups=self.conv_dim, padding=config.mamba_d_conv - 1)
projection_size = self.intermediate_size + self.conv_dim + self.num_heads
self.in_proj = nn.Linear(self.hidden_size, projection_size, bias=config.add_bias_linear)
self.dt_bias = nn.Parameter(torch.ones(self.num_heads))
A = torch.arange(1, self.num_heads + 1)
self.A_log = nn.Parameter(torch.log(A))
self.norm = Zamba2RMSNormGated(self.intermediate_size, group_size=self.intermediate_size // self.n_groups, eps=1e-05)
self.D = nn.Parameter(torch.ones(self.num_heads))
self.out_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=config.add_bias_linear)
if not is_fast_path_available:
logger.warning_once('The fast path is not available because on of `(selective_state_update, causal_conv1d_fn, causal_conv1d_update)` is None. Falling back to the naive implementation. To install follow https://github.com/state-spaces/mamba/#installation and https://github.com/Dao-AILab/causal-conv1d')
def cuda_kernels_forward(self, hidden_states: torch.Tensor, cache_params: Optional[Zamba2HybridDynamicCache]=None, attention_mask: Optional[torch.Tensor]=None):
batch_size, seq_len, _ = hidden_states.shape
groups_time_state_size = self.n_groups * self.ssm_state_size
d_to_remove = 2 * self.intermediate_size + 2 * self.n_groups * self.ssm_state_size + self.num_heads
if cache_params is not None and cache_params.has_previous_state:
in_projected_states = self.in_proj(hidden_states.squeeze(1))
d_mlp = (in_projected_states.shape[-1] - d_to_remove) // 2
split_projection_dim = [d_mlp, d_mlp, self.intermediate_size, self.conv_dim, self.num_heads]
_, _, gate, hidden_states_B_C, dt = torch.split(in_projected_states, split_projection_dim, dim=-1)
hidden_states_B_C = causal_conv1d_update(hidden_states_B_C, cache_params.conv_states[self.layer_idx], self.conv1d.weight.squeeze(1), self.conv1d.bias, self.activation)
hidden_states, B, C = torch.split(hidden_states_B_C, [self.intermediate_size, groups_time_state_size, groups_time_state_size], dim=-1)
A = -torch.exp(self.A_log.float())
A = A[:, None, ...][:, :, None].expand(-1, self.head_dim, self.ssm_state_size).to(dtype=torch.float32)
dt = dt[:, :, None].expand(-1, -1, self.head_dim)
dt_bias = self.dt_bias[:, None, ...].expand(-1, self.head_dim)
D = self.D[:, None, ...].expand(-1, self.head_dim)
B = B.view(batch_size, self.n_groups, B.shape[1] // self.n_groups)
C = C.view(batch_size, self.n_groups, C.shape[1] // self.n_groups)
hidden_states_reshaped = hidden_states.view(batch_size, self.num_heads, self.head_dim)
hidden_states = selective_state_update(cache_params.ssm_states[self.layer_idx], hidden_states_reshaped, dt, A, B, C, D, z=None, dt_bias=dt_bias, dt_softplus=True)
hidden_states = hidden_states.view(batch_size, self.num_heads * self.head_dim)
hidden_states = self.norm(hidden_states, gate)
out = self.out_proj(hidden_states)[:, None, ...]
else:
if attention_mask is not None and (not torch.all(attention_mask == 1)):
dtype = hidden_states.dtype
hidden_states = (hidden_states * attention_mask[:, :, None]).to(dtype)
projected_states = self.in_proj(hidden_states)
A = -torch.exp(self.A_log.float())
dt_limit_kwargs = {} if self.time_step_limit is None else {'dt_limit': self.time_step_limit}
if attention_mask is not None:
input_not_masked = torch.all(attention_mask == 1)
else:
input_not_masked = True
if self.use_mem_eff_path and self.training and (cache_params is None) and input_not_masked:
out, ssm_state = mamba_split_conv1d_scan_combined(projected_states, self.conv1d.weight.squeeze(1), self.conv1d.bias, self.dt_bias, A, D=self.D, chunk_size=self.chunk_size, seq_idx=None, activation=self.activation, rmsnorm_weight=self.norm.weight, rmsnorm_eps=self.norm.variance_epsilon, outproj_weight=self.out_proj.weight, outproj_bias=self.out_proj.bias, headdim=self.head_dim, ngroups=self.n_groups, norm_before_gate=False, return_final_states=True, **dt_limit_kwargs)
else:
gate, hidden_states_B_C, time_step = torch.split(projected_states, [self.intermediate_size, self.conv_dim, self.num_heads], dim=-1)
if cache_params is not None:
hidden_states_B_C_t = hidden_states_B_C.transpose(1, 2)
conv_state = nn.functional.pad(hidden_states_B_C_t, (self.conv_kernel_size - hidden_states_B_C_t.shape[-1], 0))
cache_params.conv_states[self.layer_idx].copy_(conv_state)
if causal_conv1d_fn is None or self.activation not in ['silu', 'swish']:
hidden_states_B_C = self.act(self.conv1d(hidden_states_B_C.transpose(1, 2)).transpose(1, 2)[:, :seq_len])
else:
hidden_states_B_C = causal_conv1d_fn(x=hidden_states_B_C.transpose(1, 2), weight=self.conv1d.weight.squeeze(1), bias=self.conv1d.bias, activation=self.activation).transpose(1, 2)[:, :seq_len]
hidden_states, B, C = torch.split(hidden_states_B_C, [self.intermediate_size, groups_time_state_size, groups_time_state_size], dim=-1)
if attention_mask is not None and (not torch.all(attention_mask == 1)):
dtype = hidden_states.dtype
hidden_states = (hidden_states * attention_mask[:, :, None]).to(dtype)
scan_output, ssm_state = mamba_chunk_scan_combined(hidden_states.view(batch_size, seq_len, -1, self.head_dim), time_step, A, B.view(batch_size, seq_len, self.n_groups, -1), C.view(batch_size, seq_len, self.n_groups, -1), chunk_size=self.chunk_size, D=self.D, z=None, seq_idx=None, return_final_states=True, dt_bias=self.dt_bias, dt_softplus=True, **dt_limit_kwargs)
if ssm_state is not None and cache_params is not None:
cache_params.ssm_states[self.layer_idx].copy_(ssm_state)
scan_output = scan_output.view(batch_size, seq_len, -1)
scan_output = self.norm(scan_output, gate)
out = self.out_proj(scan_output)
return out
def torch_forward(self, input_states, cache_params: Optional[Zamba2HybridDynamicCache]=None, attention_mask: Optional[torch.Tensor]=None):
batch_size, seq_len, _ = input_states.shape
dtype = input_states.dtype
if cache_params is not None and cache_params.has_previous_state:
projected_states = self.in_proj(input_states.squeeze(1))
else:
if attention_mask is not None and (not torch.all(attention_mask == 1)):
input_states = (input_states * attention_mask[:, :, None]).to(dtype)
projected_states = self.in_proj(input_states)
d_mlp = (projected_states.shape[-1] - 2 * self.intermediate_size - 2 * self.n_groups * self.ssm_state_size - self.num_heads) // 2
_, _, gate, hidden_states, dt = projected_states.split([d_mlp, d_mlp, self.intermediate_size, self.conv_dim, self.num_heads], dim=-1)
if cache_params is not None:
ssm_state = cache_params.ssm_states[self.layer_idx].clone()
ssm_state = ssm_state.to(hidden_states.device)
if cache_params.has_previous_state:
gate = gate.unsqueeze(1)
conv_state = cache_params.conv_states[self.layer_idx]
conv_state = torch.roll(conv_state, shifts=-1, dims=-1)
conv_state[:, :, -1] = hidden_states[:, 0, :] if hidden_states.ndim == 3 else hidden_states
cache_params.conv_states[self.layer_idx].copy_(conv_state)
hidden_states = torch.sum(conv_state.to(projected_states.device) * self.conv1d.weight[:, 0, :], dim=-1)
if self.use_conv_bias:
hidden_states += self.conv1d.bias
hidden_states = self.act(hidden_states).to(dtype)[:, None, ...]
else:
hidden_states = hidden_states.transpose(1, 2)
conv_state = nn.functional.pad(hidden_states, (self.conv_kernel_size - hidden_states.shape[-1], 0))
cache_params.conv_states[self.layer_idx].copy_(conv_state)
hidden_states = self.act(self.conv1d(hidden_states).transpose(1, 2))[:, :seq_len, :]
if attention_mask is not None and (not torch.all(attention_mask == 1)):
dtype = hidden_states.dtype
hidden_states = (hidden_states * attention_mask[:, :, None]).to(dtype)
else:
ssm_state = torch.zeros((batch_size, self.num_heads, self.head_dim, self.ssm_state_size), device=hidden_states.device, dtype=dtype)
hidden_states = self.act(self.conv1d(hidden_states.transpose(1, 2))[..., :seq_len].transpose(1, 2))
hidden_states, B, C = torch.split(hidden_states, [self.intermediate_size, self.n_groups * self.ssm_state_size, self.n_groups * self.ssm_state_size], dim=-1)
A = -torch.exp(self.A_log.float())
if cache_params is not None and cache_params.has_previous_state:
dt = dt[:, None, ...] if dt.ndim == 2 else dt[:, 0, :][:, None, ...]
dt = dt.transpose(1, 2).expand(batch_size, dt.shape[-1], self.head_dim)
dt_bias = self.dt_bias[..., None].expand(self.dt_bias.shape[0], self.head_dim)
dt = torch.nn.functional.softplus(dt + dt_bias.to(dt.dtype))
dt = torch.clamp(dt, self.time_step_min)
A = A[..., None, None].expand(self.num_heads, self.head_dim, self.ssm_state_size).to(dtype=torch.float32)
dA = torch.exp(dt[..., None] * A)
B = B.reshape(batch_size, self.n_groups, -1)[..., None, :]
B = B.expand(batch_size, self.n_groups, self.num_heads // self.n_groups, B.shape[-1]).contiguous()
B = B.reshape(batch_size, -1, B.shape[-1])
dB = dt[..., None] * B[..., None, :]
hidden_states = hidden_states.reshape(batch_size, -1, self.head_dim)
dBx = dB * hidden_states[..., None]
cache_params.ssm_states[self.layer_idx].copy_(cache_params.ssm_states[self.layer_idx] * dA + dBx)
C = C.reshape(batch_size, self.n_groups, -1)[..., None, :]
C = C.expand(batch_size, self.n_groups, self.num_heads // self.n_groups, C.shape[-1]).contiguous()
C = C.reshape(batch_size, -1, C.shape[-1])
ssm_states = cache_params.ssm_states[self.layer_idx].to(C.dtype)
ssm_states_reshaped = ssm_states.view(batch_size * self.num_heads, self.head_dim, self.ssm_state_size)
C_reshaped = C.view(batch_size * self.num_heads, self.ssm_state_size, 1)
y = torch.bmm(ssm_states_reshaped, C_reshaped)
y = y.view(batch_size, self.num_heads, self.head_dim)
D = self.D[..., None].expand(self.D.shape[0], self.head_dim)
y = (y + hidden_states * D).to(y.dtype)
y = y.reshape(batch_size, -1)[:, None, ...]
else:
dt = nn.functional.softplus(dt + self.dt_bias)
dt = torch.clamp(dt, self.time_step_min)
hidden_states = hidden_states.reshape(batch_size, seq_len, -1, self.head_dim).float()
B = B.reshape(batch_size, seq_len, -1, self.ssm_state_size).float()
C = C.reshape(batch_size, seq_len, -1, self.ssm_state_size).float()
B = B.repeat_interleave(self.num_heads // self.n_groups, dim=2, output_size=self.num_heads)
C = C.repeat_interleave(self.num_heads // self.n_groups, dim=2, output_size=self.num_heads)
pad_size = (self.chunk_size - seq_len % self.chunk_size) % self.chunk_size
D_residual = self.D[..., None] * pad_tensor_by_size(hidden_states, pad_size)
hidden_states = hidden_states * dt[..., None]
A = A.to(hidden_states.dtype) * dt
hidden_states, A, B, C = [reshape_into_chunks(t, pad_size, self.chunk_size) for t in (hidden_states, A, B, C)]
A = A.permute(0, 3, 1, 2)
A_cumsum = torch.cumsum(A, dim=-1)
L = torch.exp(segment_sum(A))
G_intermediate = C[:, :, :, None, :, :] * B[:, :, None, :, :, :]
G = G_intermediate.sum(dim=-1)
M_intermediate = G[..., None] * L.permute(0, 2, 3, 4, 1)[..., None]
M = M_intermediate.sum(dim=-1)
Y_diag = (M[..., None] * hidden_states[:, :, None]).sum(3)
decay_states = torch.exp(A_cumsum[:, :, :, -1:] - A_cumsum)
B_decay_contraction = B * decay_states.permute(0, 2, 3, 1)[..., None]
states = (B_decay_contraction.permute(0, 1, 3, 2, 4)[..., None] * hidden_states.permute(0, 1, 3, 2, 4)[..., None, :]).sum(dim=3).permute(0, 1, 2, 4, 3)
if cache_params is not None and cache_params.has_previous_state:
previous_states = cache_params.ssm_states[self.layer_idx][:, None, ...]
else:
previous_states = torch.zeros_like(states[:, :1])
states = torch.cat([previous_states, states], dim=1)
decay_chunk = torch.exp(segment_sum(nn.functional.pad(A_cumsum[:, :, :, -1], (1, 0))))
states_permuted = states.permute(0, 2, 1, 3, 4)
result = (decay_chunk[..., None, None] * states_permuted[:, :, None, ...]).sum(dim=2)
new_states = result.permute(0, 2, 1, 3, 4)
states, ssm_state = (new_states[:, :-1], new_states[:, -1])
state_decay_out = torch.exp(A_cumsum)
C_times_states = C[..., None, :] * states[:, :, None, ...]
state_decay_out_permuted = state_decay_out.permute(0, 2, 3, 1)
Y_off = C_times_states.sum(-1) * state_decay_out_permuted[..., None]
y = Y_diag + Y_off
y = y.reshape(batch_size, -1, self.num_heads, self.head_dim)
y = y + D_residual
if pad_size > 0:
y = y[:, :seq_len, :, :]
y = y.reshape(batch_size, seq_len, -1)
if ssm_state is not None and cache_params is not None:
cache_params.ssm_states[self.layer_idx].copy_(ssm_state)
scan_output = self.norm(y, gate)
contextualized_states = self.out_proj(scan_output.to(dtype))
return contextualized_states
def forward(self, hidden_states, cache_params: Optional[Zamba2HybridDynamicCache]=None, attention_mask: Optional[torch.Tensor]=None):
if is_fast_path_available and 'cuda' in self.in_proj.weight.device.type:
return self.cuda_kernels_forward(hidden_states, cache_params, attention_mask)
return self.torch_forward(hidden_states, cache_params, attention_mask)
|
class Zamba2MambaMixer(nn.Module):
'''
Compute ∆, A, B, C, and D the state space parameters and compute the `contextualized_states`.
A, D are input independent (see Mamba paper [1] Section 3.5.2 "Interpretation of A" for why A isn't selective)
∆, B, C are input-dependent (this is a key difference between Mamba and the linear time invariant S4,
and is why Mamba is called **selective** state spaces)
'''
def __init__(self, config: Zamba2Config, layer_idx: Optional[int]=None):
pass
def cuda_kernels_forward(self, hidden_states: torch.Tensor, cache_params: Optional[Zamba2HybridDynamicCache]=None, attention_mask: Optional[torch.Tensor]=None):
pass
def torch_forward(self, input_states, cache_params: Optional[Zamba2HybridDynamicCache]=None, attention_mask: Optional[torch.Tensor]=None):
pass
def forward(self, hidden_states, cache_params: Optional[Zamba2HybridDynamicCache]=None, attention_mask: Optional[torch.Tensor]=None):
pass
| 5
| 1
| 104
| 11
| 79
| 18
| 7
| 0.25
| 1
| 6
| 3
| 0
| 4
| 25
| 4
| 14
| 428
| 49
| 315
| 105
| 300
| 80
| 198
| 95
| 193
| 13
| 1
| 3
| 27
|
6,366
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/zamba2/modular_zamba2.py
|
transformers.models.zamba2.modular_zamba2.Zamba2Model
|
from ...modeling_outputs import BaseModelOutputWithPast
from typing import Callable, Optional, Union
import re
import torch
from .configuration_zamba2 import Zamba2Config
from torch import nn
from itertools import cycle
from ..zamba.modeling_zamba import ZambaAttention, ZambaAttentionDecoderLayer, ZambaForCausalLM, ZambaForSequenceClassification, ZambaHybridDynamicCache, ZambaHybridLayer, ZambaMambaDecoderLayer, ZambaModel, ZambaRMSNorm, eager_attention_forward
class Zamba2Model(ZambaModel, Zamba2PreTrainedModel):
"""
Model consisting of *config.num_hidden_layers* layers.
Args:
config: Zamba2Config
"""
def __init__(self, config: Zamba2Config):
Zamba2PreTrainedModel.__init__(self, config)
self.config = config
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
blocks = [Zamba2AttentionDecoderLayer(config, block_id=k) for k in range(config.num_mem_blocks)]
mamba_layers = []
linear_layers = []
self.layers_block_type = config.layers_block_type
for i in range(config.num_hidden_layers):
if config.layers_block_type[i] == 'mamba':
mamba_layers.append(Zamba2MambaDecoderLayer(config, layer_idx=i))
elif config.layers_block_type[i] == 'hybrid':
linear_layers.append(nn.Linear(self.config.hidden_size, self.config.hidden_size, bias=False))
mamba_layers.append(Zamba2MambaDecoderLayer(config, layer_idx=i))
mamba_layers = iter(mamba_layers)
linear_layers = iter(linear_layers)
blocks = cycle(blocks)
layers = self.get_layers(blocks, linear_layers, mamba_layers)
self.layers = nn.ModuleList(layers)
self._attn_implementation = config._attn_implementation
self.final_layernorm = Zamba2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
if config.use_mem_rope:
if config.use_long_context:
logger.warning_once('`use_long_context` set to `True`: using rescaled `rope_theta` and extended `max_position_embeddings`.')
self.rotary_emb = Zamba2RotaryEmbedding(config)
self.gradient_checkpointing = False
self.post_init()
def get_layers(self, blocks, linear_layers, mamba_layers):
layers = []
self._tied_weights_keys = []
self.first_transformer_layer_id = 0
for layer_id, layer_type in enumerate(self.layers_block_type):
if layer_type == 'hybrid':
if self.first_transformer_layer_id == 0:
self.first_transformer_layer_id = layer_id
block = next(blocks)
if self.config.num_mem_blocks * len(self.config.hybrid_layer_ids) > 1:
prefix_pattern = f'^layers\\.{layer_id}\\.shared_transformer\\.'
main_keys_pattern = re.compile(prefix_pattern + '(?:' + 'self_attn\\.(?:q_proj|k_proj|v_proj|o_proj)\\.weight|' + 'feed_forward\\.(?:gate_up_proj|down_proj)\\.weight|' + '(?:input_layernorm|pre_ff_layernorm)\\.weight' + ')$')
self._tied_weights_keys.append(main_keys_pattern)
adapter_id = 0
for _layer_type in self.layers_block_type:
if _layer_type == 'hybrid' and adapter_id % self.config.num_mem_blocks == block.block_id:
adapter_pattern = re.compile('^shared_transformer\\.feed_forward\\.gate_up_proj_adapter_list\\.' + str(adapter_id) + '\\.(?:0|1)\\.weight$')
self._tied_weights_keys.append(adapter_pattern)
adapter_id += 1
if self.config.use_shared_attention_adapter:
adapter_id = 0
for _layer_type in self.layers_block_type:
if _layer_type == 'hybrid' and adapter_id % self.config.num_mem_blocks == block.block_id:
attn_adapter_pattern = re.compile('^shared_transformer\\.self_attn\\.' + '(?:linear_q_adapter_list|linear_k_adapter_list|linear_v_adapter_list)\\.' + str(adapter_id) + '\\.(?:0|1)\\.weight$')
self._tied_weights_keys.append(attn_adapter_pattern)
adapter_id += 1
layers.append(Zamba2HybridLayer(block, next(linear_layers), next(mamba_layers)))
else:
layers.append(next(mamba_layers))
return layers
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Zamba2HybridDynamicCache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None) -> Union[tuple, BaseModelOutputWithPast]:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time, and must specify either one')
if self.gradient_checkpointing and self.training and use_cache:
logger.warning_once('`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`.')
use_cache = False
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids)
hidden_states = inputs_embeds
original_hidden_states = torch.clone(inputs_embeds)
if use_cache and past_key_values is None:
batch_size = input_ids.shape[0] if input_ids is not None else inputs_embeds.shape[0]
past_key_values = Zamba2HybridDynamicCache(self.config, batch_size, dtype=self.dtype, device=self.device)
if cache_position is None:
past_seen_tokens = past_key_values.get_seq_length(layer_idx=self.first_transformer_layer_id) if past_key_values is not None else 0
cache_position = torch.arange(past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device)
if position_ids is None:
position_ids = cache_position.unsqueeze(0)
causal_mask = self._update_causal_mask(attention_mask, inputs_embeds, cache_position)
if self.config.use_mem_rope:
position_embeddings = self.rotary_emb(hidden_states, position_ids)
else:
position_embeddings = None
all_hidden_states = () if output_hidden_states else None
all_self_attns = () if output_attentions else None
for layer_idx, layer in enumerate(self.layers):
if output_hidden_states:
all_hidden_states += (hidden_states,)
if self.gradient_checkpointing and self.training:
layer_outputs = self._gradient_checkpointing_func(layer.__call__, hidden_states, original_hidden_states, layer_idx, attention_mask, causal_mask, past_key_values, output_attentions, use_cache, position_embeddings)
else:
layer_outputs = layer(hidden_states, original_hidden_states=original_hidden_states, layer_idx=layer_idx, attention_mask=attention_mask, causal_mask=causal_mask, past_key_values=past_key_values, output_attentions=output_attentions, use_cache=use_cache, position_embeddings=position_embeddings)
hidden_states = layer_outputs[0]
if output_attentions:
if layer_outputs[1] is not None:
all_self_attns += (layer_outputs[1],)
hidden_states = self.final_layernorm(hidden_states)
if output_hidden_states:
all_hidden_states += (hidden_states,)
if past_key_values is not None and (not past_key_values.has_previous_state):
past_key_values.has_previous_state = True
output = BaseModelOutputWithPast(last_hidden_state=hidden_states, past_key_values=past_key_values if use_cache else None, hidden_states=all_hidden_states, attentions=all_self_attns)
return output if return_dict else output.to_tuple()
|
class Zamba2Model(ZambaModel, Zamba2PreTrainedModel):
'''
Model consisting of *config.num_hidden_layers* layers.
Args:
config: Zamba2Config
'''
def __init__(self, config: Zamba2Config):
pass
def get_layers(self, blocks, linear_layers, mamba_layers):
pass
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Zamba2HybridDynamicCache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None) -> Union[tuple, BaseModelOutputWithPast]:
pass
| 4
| 1
| 67
| 7
| 58
| 2
| 14
| 0.06
| 2
| 15
| 8
| 0
| 3
| 13
| 3
| 140
| 211
| 26
| 175
| 54
| 159
| 10
| 103
| 41
| 99
| 25
| 3
| 6
| 41
|
6,367
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/zamba2/modular_zamba2.py
|
transformers.models.zamba2.modular_zamba2.Zamba2PreTrainedModel
|
import torch
import math
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from .configuration_zamba2 import Zamba2Config
class Zamba2PreTrainedModel(PreTrainedModel):
config: Zamba2Config
base_model_prefix = 'model'
supports_gradient_checkpointing = True
_no_split_modules = ['Zamba2AttentionDecoderLayer', 'Zamba2MambaDecoderLayer']
_skip_keys_device_placement = 'past_key_values'
_supports_flash_attn = True
_supports_flex_attn = True
_supports_sdpa = True
_is_stateful = True
def _init_weights(self, module):
super()._init_weights(module)
if isinstance(module, Zamba2MambaMixer):
dt = torch.exp(torch.rand(self.config.n_mamba_heads) * (math.log(self.config.time_step_max) - math.log(self.config.time_step_min)) + math.log(self.config.time_step_min)).clamp(min=self.config.time_step_floor)
inv_dt = dt + torch.log(-torch.expm1(-dt))
module.dt_bias.data.copy_(inv_dt)
A = torch.arange(1, module.num_heads + 1)
module.A_log.data.copy_(torch.log(A))
module.D.data.fill_(1.0)
|
class Zamba2PreTrainedModel(PreTrainedModel):
def _init_weights(self, module):
pass
| 2
| 0
| 25
| 2
| 22
| 1
| 6
| 0.06
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 130
| 37
| 3
| 33
| 15
| 31
| 2
| 27
| 15
| 25
| 6
| 2
| 2
| 6
|
6,368
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/zamba2/modular_zamba2.py
|
transformers.models.zamba2.modular_zamba2.Zamba2RMSNorm
|
from ..zamba.modeling_zamba import ZambaAttention, ZambaAttentionDecoderLayer, ZambaForCausalLM, ZambaForSequenceClassification, ZambaHybridDynamicCache, ZambaHybridLayer, ZambaMambaDecoderLayer, ZambaModel, ZambaRMSNorm, eager_attention_forward
class Zamba2RMSNorm(ZambaRMSNorm):
pass
|
class Zamba2RMSNorm(ZambaRMSNorm):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 13
| 2
| 0
| 2
| 1
| 1
| 0
| 2
| 1
| 1
| 0
| 2
| 0
| 0
|
6,369
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/zamba2/modular_zamba2.py
|
transformers.models.zamba2.modular_zamba2.Zamba2RMSNormGated
|
from torch import nn
import torch
class Zamba2RMSNormGated(torch.nn.Module):
def __init__(self, hidden_size, group_size, eps=1e-06):
super().__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.variance_epsilon = eps
self.group_size = group_size
def forward(self, hidden_states, gate=None):
input_dtype = hidden_states.dtype
hidden_states = hidden_states.to(torch.float32)
if gate is not None:
hidden_states = hidden_states * nn.functional.silu(gate.to(torch.float32))
*prefix_dims, last_dim = hidden_states.shape
group_count = last_dim // self.group_size
hidden_states_group = hidden_states.view(*prefix_dims, group_count, self.group_size)
variance = hidden_states_group.pow(2).mean(-1, keepdim=True)
hidden_states_group = hidden_states_group * torch.rsqrt(variance + self.variance_epsilon)
hidden_states = hidden_states_group.view(*prefix_dims, group_count * self.group_size)
return self.weight * hidden_states.to(input_dtype)
|
class Zamba2RMSNormGated(torch.nn.Module):
def __init__(self, hidden_size, group_size, eps=1e-06):
pass
def forward(self, hidden_states, gate=None):
pass
| 3
| 0
| 9
| 0
| 9
| 0
| 2
| 0
| 1
| 1
| 0
| 0
| 2
| 3
| 2
| 12
| 19
| 1
| 18
| 11
| 15
| 0
| 18
| 11
| 15
| 2
| 1
| 1
| 3
|
6,370
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/zamba2/modular_zamba2.py
|
transformers.models.zamba2.modular_zamba2.Zamba2RotaryEmbedding
|
from ..llama.modeling_llama import LlamaRotaryEmbedding, apply_rotary_pos_emb
class Zamba2RotaryEmbedding(LlamaRotaryEmbedding):
pass
|
class Zamba2RotaryEmbedding(LlamaRotaryEmbedding):
pass
| 1
| 0
| 10
| 0
| 9
| 1
| 1
| 0.1
| 1
| 2
| 1
| 0
| 1
| 1
| 1
| 14
| 11
| 0
| 10
| 7
| 4
| 1
| 4
| 3
| 2
| 1
| 2
| 0
| 1
|
6,371
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/zoedepth/configuration_zoedepth.py
|
transformers.models.zoedepth.configuration_zoedepth.ZoeDepthConfig
|
from ...configuration_utils import PretrainedConfig
from ..auto.configuration_auto import CONFIG_MAPPING
class ZoeDepthConfig(PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`ZoeDepthForDepthEstimation`]. It is used to instantiate an ZoeDepth
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the ZoeDepth
[Intel/zoedepth-nyu](https://huggingface.co/Intel/zoedepth-nyu) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
backbone_config (`Union[dict[str, Any], PretrainedConfig]`, *optional*, defaults to `BeitConfig()`):
The configuration of the backbone model.
backbone (`str`, *optional*):
Name of backbone to use when `backbone_config` is `None`. If `use_pretrained_backbone` is `True`, this
will load the corresponding pretrained weights from the timm or transformers library. If `use_pretrained_backbone`
is `False`, this loads the backbone's config and uses that to initialize the backbone with random weights.
use_pretrained_backbone (`bool`, *optional*, defaults to `False`):
Whether to use pretrained weights for the backbone.
backbone_kwargs (`dict`, *optional*):
Keyword arguments to be passed to AutoBackbone when loading from a checkpoint
e.g. `{'out_indices': (0, 1, 2, 3)}`. Cannot be specified if `backbone_config` is set.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` are supported.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
batch_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the batch normalization layers.
readout_type (`str`, *optional*, defaults to `"project"`):
The readout type to use when processing the readout token (CLS token) of the intermediate hidden states of
the ViT backbone. Can be one of [`"ignore"`, `"add"`, `"project"`].
- "ignore" simply ignores the CLS token.
- "add" passes the information from the CLS token to all other tokens by adding the representations.
- "project" passes information to the other tokens by concatenating the readout to all other tokens before
projecting the
representation to the original feature dimension D using a linear layer followed by a GELU non-linearity.
reassemble_factors (`list[int]`, *optional*, defaults to `[4, 2, 1, 0.5]`):
The up/downsampling factors of the reassemble layers.
neck_hidden_sizes (`list[str]`, *optional*, defaults to `[96, 192, 384, 768]`):
The hidden sizes to project to for the feature maps of the backbone.
fusion_hidden_size (`int`, *optional*, defaults to 256):
The number of channels before fusion.
head_in_index (`int`, *optional*, defaults to -1):
The index of the features to use in the heads.
use_batch_norm_in_fusion_residual (`bool`, *optional*, defaults to `False`):
Whether to use batch normalization in the pre-activate residual units of the fusion blocks.
use_bias_in_fusion_residual (`bool`, *optional*, defaults to `True`):
Whether to use bias in the pre-activate residual units of the fusion blocks.
num_relative_features (`int`, *optional*, defaults to 32):
The number of features to use in the relative depth estimation head.
add_projection (`bool`, *optional*, defaults to `False`):
Whether to add a projection layer before the depth estimation head.
bottleneck_features (`int`, *optional*, defaults to 256):
The number of features in the bottleneck layer.
num_attractors (`list[int], *optional*, defaults to `[16, 8, 4, 1]`):
The number of attractors to use in each stage.
bin_embedding_dim (`int`, *optional*, defaults to 128):
The dimension of the bin embeddings.
attractor_alpha (`int`, *optional*, defaults to 1000):
The alpha value to use in the attractor.
attractor_gamma (`int`, *optional*, defaults to 2):
The gamma value to use in the attractor.
attractor_kind (`str`, *optional*, defaults to `"mean"`):
The kind of attractor to use. Can be one of [`"mean"`, `"sum"`].
min_temp (`float`, *optional*, defaults to 0.0212):
The minimum temperature value to consider.
max_temp (`float`, *optional*, defaults to 50.0):
The maximum temperature value to consider.
bin_centers_type (`str`, *optional*, defaults to `"softplus"`):
Activation type used for bin centers. Can be "normed" or "softplus". For "normed" bin centers, linear normalization trick
is applied. This results in bounded bin centers. For "softplus", softplus activation is used and thus are unbounded.
bin_configurations (`list[dict]`, *optional*, defaults to `[{'n_bins': 64, 'min_depth': 0.001, 'max_depth': 10.0}]`):
Configuration for each of the bin heads.
Each configuration should consist of the following keys:
- name (`str`): The name of the bin head - only required in case of multiple bin configurations.
- `n_bins` (`int`): The number of bins to use.
- `min_depth` (`float`): The minimum depth value to consider.
- `max_depth` (`float`): The maximum depth value to consider.
In case only a single configuration is passed, the model will use a single head with the specified configuration.
In case multiple configurations are passed, the model will use multiple heads with the specified configurations.
num_patch_transformer_layers (`int`, *optional*):
The number of transformer layers to use in the patch transformer. Only used in case of multiple bin configurations.
patch_transformer_hidden_size (`int`, *optional*):
The hidden size to use in the patch transformer. Only used in case of multiple bin configurations.
patch_transformer_intermediate_size (`int`, *optional*):
The intermediate size to use in the patch transformer. Only used in case of multiple bin configurations.
patch_transformer_num_attention_heads (`int`, *optional*):
The number of attention heads to use in the patch transformer. Only used in case of multiple bin configurations.
Example:
```python
>>> from transformers import ZoeDepthConfig, ZoeDepthForDepthEstimation
>>> # Initializing a ZoeDepth zoedepth-large style configuration
>>> configuration = ZoeDepthConfig()
>>> # Initializing a model from the zoedepth-large style configuration
>>> model = ZoeDepthForDepthEstimation(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = 'zoedepth'
def __init__(self, backbone_config=None, backbone=None, use_pretrained_backbone=False, backbone_kwargs=None, hidden_act='gelu', initializer_range=0.02, batch_norm_eps=1e-05, readout_type='project', reassemble_factors=[4, 2, 1, 0.5], neck_hidden_sizes=[96, 192, 384, 768], fusion_hidden_size=256, head_in_index=-1, use_batch_norm_in_fusion_residual=False, use_bias_in_fusion_residual=None, num_relative_features=32, add_projection=False, bottleneck_features=256, num_attractors=[16, 8, 4, 1], bin_embedding_dim=128, attractor_alpha=1000, attractor_gamma=2, attractor_kind='mean', min_temp=0.0212, max_temp=50.0, bin_centers_type='softplus', bin_configurations=[{'n_bins': 64, 'min_depth': 0.001, 'max_depth': 10.0}], num_patch_transformer_layers=None, patch_transformer_hidden_size=None, patch_transformer_intermediate_size=None, patch_transformer_num_attention_heads=None, **kwargs):
super().__init__(**kwargs)
if readout_type not in ['ignore', 'add', 'project']:
raise ValueError("Readout_type must be one of ['ignore', 'add', 'project']")
if attractor_kind not in ['mean', 'sum']:
raise ValueError("Attractor_kind must be one of ['mean', 'sum']")
if use_pretrained_backbone:
raise ValueError('Pretrained backbones are not supported yet.')
if backbone_config is not None and backbone is not None:
raise ValueError("You can't specify both `backbone` and `backbone_config`.")
if backbone_config is None and backbone is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `BEiT` backbone.')
backbone_config = CONFIG_MAPPING['beit'](image_size=384, num_hidden_layers=24, hidden_size=1024, intermediate_size=4096, num_attention_heads=16, use_relative_position_bias=True, reshape_hidden_states=False, out_features=['stage6', 'stage12', 'stage18', 'stage24'])
elif isinstance(backbone_config, dict):
backbone_model_type = backbone_config.get('model_type')
config_class = CONFIG_MAPPING[backbone_model_type]
backbone_config = config_class.from_dict(backbone_config)
if backbone_kwargs is not None and backbone_kwargs and (backbone_config is not None):
raise ValueError("You can't specify both `backbone_kwargs` and `backbone_config`.")
self.backbone_config = backbone_config
self.backbone = backbone
self.hidden_act = hidden_act
self.use_pretrained_backbone = use_pretrained_backbone
self.initializer_range = initializer_range
self.batch_norm_eps = batch_norm_eps
self.readout_type = readout_type
self.reassemble_factors = reassemble_factors
self.neck_hidden_sizes = neck_hidden_sizes
self.fusion_hidden_size = fusion_hidden_size
self.head_in_index = head_in_index
self.use_batch_norm_in_fusion_residual = use_batch_norm_in_fusion_residual
self.use_bias_in_fusion_residual = use_bias_in_fusion_residual
self.num_relative_features = num_relative_features
self.add_projection = add_projection
self.bottleneck_features = bottleneck_features
self.num_attractors = num_attractors
self.bin_embedding_dim = bin_embedding_dim
self.attractor_alpha = attractor_alpha
self.attractor_gamma = attractor_gamma
self.attractor_kind = attractor_kind
self.min_temp = min_temp
self.max_temp = max_temp
self.bin_centers_type = bin_centers_type
self.bin_configurations = bin_configurations
self.num_patch_transformer_layers = num_patch_transformer_layers
self.patch_transformer_hidden_size = patch_transformer_hidden_size
self.patch_transformer_intermediate_size = patch_transformer_intermediate_size
self.patch_transformer_num_attention_heads = patch_transformer_num_attention_heads
@property
def sub_configs(self):
return {'backbone_config': type(self.backbone_config)} if getattr(self, 'backbone_config', None) is not None else {}
| null | 4
| 1
| 98
| 8
| 90
| 0
| 8
| 1.04
| 1
| 3
| 0
| 0
| 1
| 29
| 1
| 1
| 206
| 18
| 92
| 67
| 57
| 96
| 49
| 34
| 47
| 8
| 1
| 1
| 8
|
6,372
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/zoedepth/image_processing_zoedepth.py
|
transformers.models.zoedepth.image_processing_zoedepth.ZoeDepthImageProcessor
|
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_utils import IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, get_image_size, infer_channel_dimension_format, is_scaled_image, make_flat_list_of_images, to_numpy_array, valid_images, validate_preprocess_arguments
from ...image_transforms import PaddingMode, pad, to_channel_dimension_format
from typing import TYPE_CHECKING, Optional, Union
import numpy as np
from ...utils import TensorType, filter_out_non_signature_kwargs, is_torch_available, is_vision_available, logging, requires_backends
class ZoeDepthImageProcessor(BaseImageProcessor):
"""
Constructs a ZoeDepth image processor.
Args:
do_pad (`bool`, *optional*, defaults to `True`):
Whether to apply pad the input.
do_rescale (`bool`, *optional*, defaults to `True`):
Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by `do_rescale` in
`preprocess`.
rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
Scale factor to use if rescaling the image. Can be overridden by `rescale_factor` in `preprocess`.
do_normalize (`bool`, *optional*, defaults to `True`):
Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess`
method.
image_mean (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`):
Mean to use if normalizing the image. This is a float or list of floats the length of the number of
channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
image_std (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`):
Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
do_resize (`bool`, *optional*, defaults to `True`):
Whether to resize the image's (height, width) dimensions. Can be overridden by `do_resize` in `preprocess`.
size (`dict[str, int]` *optional*, defaults to `{"height": 384, "width": 512}`):
Size of the image after resizing. Size of the image after resizing. If `keep_aspect_ratio` is `True`,
the image is resized by choosing the smaller of the height and width scaling factors and using it for both dimensions.
If `ensure_multiple_of` is also set, the image is further resized to a size that is a multiple of this value.
Can be overridden by `size` in `preprocess`.
resample (`PILImageResampling`, *optional*, defaults to `Resampling.BILINEAR`):
Defines the resampling filter to use if resizing the image. Can be overridden by `resample` in `preprocess`.
keep_aspect_ratio (`bool`, *optional*, defaults to `True`):
If `True`, the image is resized by choosing the smaller of the height and width scaling factors and using it
for both dimensions. This ensures that the image is scaled down as little as possible while still fitting
within the desired output size. In case `ensure_multiple_of` is also set, the image is further resized to a
size that is a multiple of this value by flooring the height and width to the nearest multiple of this value.
Can be overridden by `keep_aspect_ratio` in `preprocess`.
ensure_multiple_of (`int`, *optional*, defaults to 32):
If `do_resize` is `True`, the image is resized to a size that is a multiple of this value. Works by flooring
the height and width to the nearest multiple of this value.
Works both with and without `keep_aspect_ratio` being set to `True`. Can be overridden by `ensure_multiple_of`
in `preprocess`.
"""
model_input_names = ['pixel_values']
def __init__(self, do_pad: bool=True, do_rescale: bool=True, rescale_factor: Union[int, float]=1 / 255, do_normalize: bool=True, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, do_resize: bool=True, size: Optional[dict[str, int]]=None, resample: PILImageResampling=PILImageResampling.BILINEAR, keep_aspect_ratio: bool=True, ensure_multiple_of: int=32, **kwargs) -> None:
super().__init__(**kwargs)
self.do_rescale = do_rescale
self.rescale_factor = rescale_factor
self.do_pad = do_pad
self.do_normalize = do_normalize
self.image_mean = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
self.image_std = image_std if image_std is not None else IMAGENET_STANDARD_STD
size = size if size is not None else {'height': 384, 'width': 512}
size = get_size_dict(size)
self.do_resize = do_resize
self.size = size
self.keep_aspect_ratio = keep_aspect_ratio
self.ensure_multiple_of = ensure_multiple_of
self.resample = resample
def resize(self, image: np.ndarray, size: dict[str, int], keep_aspect_ratio: bool=False, ensure_multiple_of: int=1, resample: PILImageResampling=PILImageResampling.BILINEAR, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray:
"""
Resize an image to target size `(size["height"], size["width"])`. If `keep_aspect_ratio` is `True`, the image
is resized to the largest possible size such that the aspect ratio is preserved. If `ensure_multiple_of` is
set, the image is resized to a size that is a multiple of this value.
Args:
image (`np.ndarray`):
Image to resize.
size (`dict[str, int]`):
Target size of the output image.
keep_aspect_ratio (`bool`, *optional*, defaults to `False`):
If `True`, the image is resized to the largest possible size such that the aspect ratio is preserved.
ensure_multiple_of (`int`, *optional*, defaults to 1):
The image is resized to a size that is a multiple of this value.
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`):
Defines the resampling filter to use if resizing the image. Otherwise, the image is resized to size
specified in `size`.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the image. If not provided, it will be the same as the input image.
input_data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
"""
if input_data_format is None:
input_data_format = infer_channel_dimension_format(image)
data_format = data_format if data_format is not None else input_data_format
size = get_size_dict(size)
if 'height' not in size or 'width' not in size:
raise ValueError(f"The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}")
output_size = get_resize_output_image_size(image, output_size=(size['height'], size['width']), keep_aspect_ratio=keep_aspect_ratio, multiple=ensure_multiple_of, input_data_format=input_data_format)
height, width = output_size
torch_image = torch.from_numpy(image).unsqueeze(0)
torch_image = torch_image.permute(0, 3, 1, 2) if input_data_format == 'channels_last' else torch_image
requires_backends(self, 'torch')
resample_to_mode = {PILImageResampling.BILINEAR: 'bilinear', PILImageResampling.BICUBIC: 'bicubic'}
mode = resample_to_mode[resample]
resized_image = nn.functional.interpolate(torch_image, (int(height), int(width)), mode=mode, align_corners=True)
resized_image = resized_image.squeeze().numpy()
resized_image = to_channel_dimension_format(resized_image, data_format, input_channel_dim=ChannelDimension.FIRST)
return resized_image
def pad_image(self, image: np.ndarray, mode: PaddingMode=PaddingMode.REFLECT, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None):
"""
Pad an image as done in the original ZoeDepth implementation.
Padding fixes the boundary artifacts in the output depth map.
Boundary artifacts are sometimes caused by the fact that the model is trained on NYU raw dataset
which has a black or white border around the image. This function pads the input image and crops
the prediction back to the original size / view.
Args:
image (`np.ndarray`):
Image to pad.
mode (`PaddingMode`):
The padding mode to use. Can be one of:
- `"constant"`: pads with a constant value.
- `"reflect"`: pads with the reflection of the vector mirrored on the first and last values of the
vector along each axis.
- `"replicate"`: pads with the replication of the last value on the edge of the array along each axis.
- `"symmetric"`: pads with the reflection of the vector mirrored along the edge of the array.
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- Unset: Use the channel dimension format of the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
"""
height, width = get_image_size(image, input_data_format)
pad_height = int(np.sqrt(height / 2) * 3)
pad_width = int(np.sqrt(width / 2) * 3)
return pad(image, padding=((pad_height, pad_height), (pad_width, pad_width)), mode=mode, data_format=data_format, input_data_format=input_data_format)
@filter_out_non_signature_kwargs()
def preprocess(self, images: ImageInput, do_pad: Optional[bool]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, do_resize: Optional[bool]=None, size: Optional[int]=None, keep_aspect_ratio: Optional[bool]=None, ensure_multiple_of: Optional[int]=None, resample: Optional[PILImageResampling]=None, return_tensors: Optional[Union[str, TensorType]]=None, data_format: ChannelDimension=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> PIL.Image.Image:
"""
Preprocess an image or batch of images.
Args:
images (`ImageInput`):
Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
passing in images with pixel values between 0 and 1, set `do_rescale=False`.
do_pad (`bool`, *optional*, defaults to `self.do_pad`):
Whether to pad the input image.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the image values between [0 - 1].
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
Whether to normalize the image.
image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`):
Image mean.
image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`):
Image standard deviation.
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
Whether to resize the image.
size (`dict[str, int]`, *optional*, defaults to `self.size`):
Size of the image after resizing. If `keep_aspect_ratio` is `True`, he image is resized by choosing the
smaller of the height and width scaling factors and using it for both dimensions. If `ensure_multiple_of`
is also set, the image is further resized to a size that is a multiple of this value.
keep_aspect_ratio (`bool`, *optional*, defaults to `self.keep_aspect_ratio`):
If `True` and `do_resize=True`, the image is resized by choosing the smaller of the height and width
scaling factors and using it for both dimensions. This ensures that the image is scaled down as little
as possible while still fitting within the desired output size. In case `ensure_multiple_of` is also
set, the image is further resized to a size that is a multiple of this value by flooring the height and
width to the nearest multiple of this value.
ensure_multiple_of (`int`, *optional*, defaults to `self.ensure_multiple_of`):
If `do_resize` is `True`, the image is resized to a size that is a multiple of this value. Works by
flooring the height and width to the nearest multiple of this value.
Works both with and without `keep_aspect_ratio` being set to `True`. Can be overridden by
`ensure_multiple_of` in `preprocess`.
resample (`int`, *optional*, defaults to `self.resample`):
Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`, Only
has an effect if `do_resize` is set to `True`.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. Can be one of:
- `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `ChannelDimension.LAST`: image in (height, width, num_channels) format.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
"""
do_resize = do_resize if do_resize is not None else self.do_resize
size = size if size is not None else self.size
size = get_size_dict(size)
keep_aspect_ratio = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
ensure_multiple_of = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
resample = resample if resample is not None else self.resample
do_rescale = do_rescale if do_rescale is not None else self.do_rescale
rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
do_normalize = do_normalize if do_normalize is not None else self.do_normalize
image_mean = image_mean if image_mean is not None else self.image_mean
image_std = image_std if image_std is not None else self.image_std
do_pad = do_pad if do_pad is not None else self.do_pad
images = make_flat_list_of_images(images)
if not valid_images(images):
raise ValueError('Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, or torch.Tensor')
validate_preprocess_arguments(do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_resize=do_resize, size=size, resample=resample)
images = [to_numpy_array(image) for image in images]
if do_rescale and is_scaled_image(images[0]):
logger.warning_once('It looks like you are trying to rescale already rescaled images. If the input images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again.')
if input_data_format is None:
input_data_format = infer_channel_dimension_format(images[0])
if do_rescale:
images = [self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format) for image in images]
if do_pad:
images = [self.pad_image(image=image, input_data_format=input_data_format) for image in images]
if do_resize:
images = [self.resize(image=image, size=size, resample=resample, keep_aspect_ratio=keep_aspect_ratio, ensure_multiple_of=ensure_multiple_of, input_data_format=input_data_format) for image in images]
if do_normalize:
images = [self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format) for image in images]
images = [to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images]
data = {'pixel_values': images}
return BatchFeature(data=data, tensor_type=return_tensors)
def post_process_depth_estimation(self, outputs: 'ZoeDepthDepthEstimatorOutput', source_sizes: Optional[Union[TensorType, list[tuple[int, int]], None]]=None, target_sizes: Optional[Union[TensorType, list[tuple[int, int]], None]]=None, outputs_flipped: Optional[Union['ZoeDepthDepthEstimatorOutput', None]]=None, do_remove_padding: Optional[Union[bool, None]]=None) -> list[dict[str, TensorType]]:
"""
Converts the raw output of [`ZoeDepthDepthEstimatorOutput`] into final depth predictions and depth PIL images.
Only supports PyTorch.
Args:
outputs ([`ZoeDepthDepthEstimatorOutput`]):
Raw outputs of the model.
source_sizes (`TensorType` or `list[tuple[int, int]]`, *optional*):
Tensor of shape `(batch_size, 2)` or list of tuples (`tuple[int, int]`) containing the source size
(height, width) of each image in the batch before preprocessing. This argument should be dealt as
"required" unless the user passes `do_remove_padding=False` as input to this function.
target_sizes (`TensorType` or `list[tuple[int, int]]`, *optional*):
Tensor of shape `(batch_size, 2)` or list of tuples (`tuple[int, int]`) containing the target size
(height, width) of each image in the batch. If left to None, predictions will not be resized.
outputs_flipped ([`ZoeDepthDepthEstimatorOutput`], *optional*):
Raw outputs of the model from flipped input (averaged out in the end).
do_remove_padding (`bool`, *optional*):
By default ZoeDepth adds padding equal to `int(√(height / 2) * 3)` (and similarly for width) to fix the
boundary artifacts in the output depth map, so we need remove this padding during post_processing. The
parameter exists here in case the user changed the image preprocessing to not include padding.
Returns:
`list[dict[str, TensorType]]`: A list of dictionaries of tensors representing the processed depth
predictions.
"""
requires_backends(self, 'torch')
predicted_depth = outputs.predicted_depth
if outputs_flipped is not None and predicted_depth.shape != outputs_flipped.predicted_depth.shape:
raise ValueError('Make sure that `outputs` and `outputs_flipped` have the same shape')
if target_sizes is not None and len(predicted_depth) != len(target_sizes):
raise ValueError('Make sure that you pass in as many target sizes as the batch dimension of the predicted depth')
if do_remove_padding is None:
do_remove_padding = self.do_pad
if source_sizes is None and do_remove_padding:
raise ValueError('Either `source_sizes` should be passed in, or `do_remove_padding` should be set to False')
if source_sizes is not None and len(predicted_depth) != len(source_sizes):
raise ValueError('Make sure that you pass in as many source image sizes as the batch dimension of the logits')
if outputs_flipped is not None:
predicted_depth = (predicted_depth + torch.flip(outputs_flipped.predicted_depth, dims=[-1])) / 2
predicted_depth = predicted_depth.unsqueeze(1)
padding_factor_h = padding_factor_w = 3
results = []
target_sizes = [None] * len(predicted_depth) if target_sizes is None else target_sizes
source_sizes = [None] * len(predicted_depth) if source_sizes is None else source_sizes
for depth, target_size, source_size in zip(predicted_depth, target_sizes, source_sizes):
if source_size is not None:
pad_h = pad_w = 0
if do_remove_padding:
pad_h = int(np.sqrt(source_size[0] / 2) * padding_factor_h)
pad_w = int(np.sqrt(source_size[1] / 2) * padding_factor_w)
depth = nn.functional.interpolate(depth.unsqueeze(1), size=[source_size[0] + 2 * pad_h, source_size[1] + 2 * pad_w], mode='bicubic', align_corners=False)
if pad_h > 0:
depth = depth[:, :, pad_h:-pad_h, :]
if pad_w > 0:
depth = depth[:, :, :, pad_w:-pad_w]
depth = depth.squeeze(1)
if target_size is not None:
target_size = [target_size[0], target_size[1]]
depth = nn.functional.interpolate(depth.unsqueeze(1), size=target_size, mode='bicubic', align_corners=False)
depth = depth.squeeze()
results.append({'predicted_depth': depth})
return results
|
class ZoeDepthImageProcessor(BaseImageProcessor):
'''
Constructs a ZoeDepth image processor.
Args:
do_pad (`bool`, *optional*, defaults to `True`):
Whether to apply pad the input.
do_rescale (`bool`, *optional*, defaults to `True`):
Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by `do_rescale` in
`preprocess`.
rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
Scale factor to use if rescaling the image. Can be overridden by `rescale_factor` in `preprocess`.
do_normalize (`bool`, *optional*, defaults to `True`):
Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess`
method.
image_mean (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`):
Mean to use if normalizing the image. This is a float or list of floats the length of the number of
channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
image_std (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`):
Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
do_resize (`bool`, *optional*, defaults to `True`):
Whether to resize the image's (height, width) dimensions. Can be overridden by `do_resize` in `preprocess`.
size (`dict[str, int]` *optional*, defaults to `{"height": 384, "width": 512}`):
Size of the image after resizing. Size of the image after resizing. If `keep_aspect_ratio` is `True`,
the image is resized by choosing the smaller of the height and width scaling factors and using it for both dimensions.
If `ensure_multiple_of` is also set, the image is further resized to a size that is a multiple of this value.
Can be overridden by `size` in `preprocess`.
resample (`PILImageResampling`, *optional*, defaults to `Resampling.BILINEAR`):
Defines the resampling filter to use if resizing the image. Can be overridden by `resample` in `preprocess`.
keep_aspect_ratio (`bool`, *optional*, defaults to `True`):
If `True`, the image is resized by choosing the smaller of the height and width scaling factors and using it
for both dimensions. This ensures that the image is scaled down as little as possible while still fitting
within the desired output size. In case `ensure_multiple_of` is also set, the image is further resized to a
size that is a multiple of this value by flooring the height and width to the nearest multiple of this value.
Can be overridden by `keep_aspect_ratio` in `preprocess`.
ensure_multiple_of (`int`, *optional*, defaults to 32):
If `do_resize` is `True`, the image is resized to a size that is a multiple of this value. Works by flooring
the height and width to the nearest multiple of this value.
Works both with and without `keep_aspect_ratio` being set to `True`. Can be overridden by `ensure_multiple_of`
in `preprocess`.
'''
def __init__(self, do_pad: bool=True, do_rescale: bool=True, rescale_factor: Union[int, float]=1 / 255, do_normalize: bool=True, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, do_resize: bool=True, size: Optional[dict[str, int]]=None, resample: PILImageResampling=PILImageResampling.BILINEAR, keep_aspect_ratio: bool=True, ensure_multiple_of: int=32, **kwargs) -> None:
pass
def resize(self, image: np.ndarray, size: dict[str, int], keep_aspect_ratio: bool=False, ensure_multiple_of: int=1, resample: PILImageResampling=PILImageResampling.BILINEAR, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray:
'''
Resize an image to target size `(size["height"], size["width"])`. If `keep_aspect_ratio` is `True`, the image
is resized to the largest possible size such that the aspect ratio is preserved. If `ensure_multiple_of` is
set, the image is resized to a size that is a multiple of this value.
Args:
image (`np.ndarray`):
Image to resize.
size (`dict[str, int]`):
Target size of the output image.
keep_aspect_ratio (`bool`, *optional*, defaults to `False`):
If `True`, the image is resized to the largest possible size such that the aspect ratio is preserved.
ensure_multiple_of (`int`, *optional*, defaults to 1):
The image is resized to a size that is a multiple of this value.
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`):
Defines the resampling filter to use if resizing the image. Otherwise, the image is resized to size
specified in `size`.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the image. If not provided, it will be the same as the input image.
input_data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
'''
pass
def pad_image(self, image: np.ndarray, mode: PaddingMode=PaddingMode.REFLECT, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None):
'''
Pad an image as done in the original ZoeDepth implementation.
Padding fixes the boundary artifacts in the output depth map.
Boundary artifacts are sometimes caused by the fact that the model is trained on NYU raw dataset
which has a black or white border around the image. This function pads the input image and crops
the prediction back to the original size / view.
Args:
image (`np.ndarray`):
Image to pad.
mode (`PaddingMode`):
The padding mode to use. Can be one of:
- `"constant"`: pads with a constant value.
- `"reflect"`: pads with the reflection of the vector mirrored on the first and last values of the
vector along each axis.
- `"replicate"`: pads with the replication of the last value on the edge of the array along each axis.
- `"symmetric"`: pads with the reflection of the vector mirrored along the edge of the array.
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- Unset: Use the channel dimension format of the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
'''
pass
@filter_out_non_signature_kwargs()
def preprocess(self, images: ImageInput, do_pad: Optional[bool]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, do_resize: Optional[bool]=None, size: Optional[int]=None, keep_aspect_ratio: Optional[bool]=None, ensure_multiple_of: Optional[int]=None, resample: Optional[PILImageResampling]=None, return_tensors: Optional[Union[str, TensorType]]=None, data_format: ChannelDimension=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> PIL.Image.Image:
'''
Preprocess an image or batch of images.
Args:
images (`ImageInput`):
Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
passing in images with pixel values between 0 and 1, set `do_rescale=False`.
do_pad (`bool`, *optional*, defaults to `self.do_pad`):
Whether to pad the input image.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the image values between [0 - 1].
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
Whether to normalize the image.
image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`):
Image mean.
image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`):
Image standard deviation.
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
Whether to resize the image.
size (`dict[str, int]`, *optional*, defaults to `self.size`):
Size of the image after resizing. If `keep_aspect_ratio` is `True`, he image is resized by choosing the
smaller of the height and width scaling factors and using it for both dimensions. If `ensure_multiple_of`
is also set, the image is further resized to a size that is a multiple of this value.
keep_aspect_ratio (`bool`, *optional*, defaults to `self.keep_aspect_ratio`):
If `True` and `do_resize=True`, the image is resized by choosing the smaller of the height and width
scaling factors and using it for both dimensions. This ensures that the image is scaled down as little
as possible while still fitting within the desired output size. In case `ensure_multiple_of` is also
set, the image is further resized to a size that is a multiple of this value by flooring the height and
width to the nearest multiple of this value.
ensure_multiple_of (`int`, *optional*, defaults to `self.ensure_multiple_of`):
If `do_resize` is `True`, the image is resized to a size that is a multiple of this value. Works by
flooring the height and width to the nearest multiple of this value.
Works both with and without `keep_aspect_ratio` being set to `True`. Can be overridden by
`ensure_multiple_of` in `preprocess`.
resample (`int`, *optional*, defaults to `self.resample`):
Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`, Only
has an effect if `do_resize` is set to `True`.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. Can be one of:
- `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `ChannelDimension.LAST`: image in (height, width, num_channels) format.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
'''
pass
def post_process_depth_estimation(self, outputs: 'ZoeDepthDepthEstimatorOutput', source_sizes: Optional[Union[TensorType, list[tuple[int, int]], None]]=None, target_sizes: Optional[Union[TensorType, list[tuple[int, int]], None]]=None, outputs_flipped: Optional[Union['ZoeDepthDepthEstimatorOutput', None]]=None, do_remove_padding: Optional[Union[bool, None]]=None) -> list[dict[str, TensorType]]:
'''
Converts the raw output of [`ZoeDepthDepthEstimatorOutput`] into final depth predictions and depth PIL images.
Only supports PyTorch.
Args:
outputs ([`ZoeDepthDepthEstimatorOutput`]):
Raw outputs of the model.
source_sizes (`TensorType` or `list[tuple[int, int]]`, *optional*):
Tensor of shape `(batch_size, 2)` or list of tuples (`tuple[int, int]`) containing the source size
(height, width) of each image in the batch before preprocessing. This argument should be dealt as
"required" unless the user passes `do_remove_padding=False` as input to this function.
target_sizes (`TensorType` or `list[tuple[int, int]]`, *optional*):
Tensor of shape `(batch_size, 2)` or list of tuples (`tuple[int, int]`) containing the target size
(height, width) of each image in the batch. If left to None, predictions will not be resized.
outputs_flipped ([`ZoeDepthDepthEstimatorOutput`], *optional*):
Raw outputs of the model from flipped input (averaged out in the end).
do_remove_padding (`bool`, *optional*):
By default ZoeDepth adds padding equal to `int(√(height / 2) * 3)` (and similarly for width) to fix the
boundary artifacts in the output depth map, so we need remove this padding during post_processing. The
parameter exists here in case the user changed the image preprocessing to not include padding.
Returns:
`list[dict[str, TensorType]]`: A list of dictionaries of tensors representing the processed depth
predictions.
'''
pass
| 7
| 5
| 81
| 8
| 45
| 28
| 9
| 0.79
| 1
| 10
| 3
| 0
| 5
| 11
| 5
| 25
| 457
| 50
| 227
| 87
| 167
| 180
| 111
| 33
| 105
| 19
| 3
| 3
| 44
|
6,373
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/zoedepth/modeling_zoedepth.py
|
transformers.models.zoedepth.modeling_zoedepth.LogBinomialSoftmax
|
from torch import nn
import torch
class LogBinomialSoftmax(nn.Module):
def __init__(self, n_classes=256, act=torch.softmax):
"""Compute log binomial distribution for n_classes
Args:
n_classes (`int`, *optional*, defaults to 256):
Number of output classes.
act (`torch.nn.Module`, *optional*, defaults to `torch.softmax`):
Activation function to apply to the output.
"""
super().__init__()
self.k = n_classes
self.act = act
self.register_buffer('k_idx', torch.arange(0, n_classes).view(1, -1, 1, 1), persistent=False)
self.register_buffer('k_minus_1', torch.tensor([self.k - 1]).view(1, -1, 1, 1), persistent=False)
def forward(self, probabilities, temperature=1.0, eps=0.0001):
"""Compute the log binomial distribution for probabilities.
Args:
probabilities (`torch.Tensor` of shape `(batch_size, num_channels, height, width)`):
Tensor containing probabilities of each class.
temperature (`float` or `torch.Tensor` of shape `(batch_size, num_channels, height, width)`, *optional*, defaults to 1):
Temperature of distribution.
eps (`float`, *optional*, defaults to 1e-4):
Small number for numerical stability.
Returns:
`torch.Tensor` of shape `(batch_size, num_channels, height, width)`:
Log binomial distribution logbinomial(p;t).
"""
if probabilities.ndim == 3:
probabilities = probabilities.unsqueeze(1)
one_minus_probabilities = torch.clamp(1 - probabilities, eps, 1)
probabilities = torch.clamp(probabilities, eps, 1)
y = log_binom(self.k_minus_1, self.k_idx) + self.k_idx * torch.log(probabilities) + (self.k_minus_1 - self.k_idx) * torch.log(one_minus_probabilities)
return self.act(y / temperature, dim=1)
|
class LogBinomialSoftmax(nn.Module):
def __init__(self, n_classes=256, act=torch.softmax):
'''Compute log binomial distribution for n_classes
Args:
n_classes (`int`, *optional*, defaults to 256):
Number of output classes.
act (`torch.nn.Module`, *optional*, defaults to `torch.softmax`):
Activation function to apply to the output.
'''
pass
def forward(self, probabilities, temperature=1.0, eps=0.0001):
'''Compute the log binomial distribution for probabilities.
Args:
probabilities (`torch.Tensor` of shape `(batch_size, num_channels, height, width)`):
Tensor containing probabilities of each class.
temperature (`float` or `torch.Tensor` of shape `(batch_size, num_channels, height, width)`, *optional*, defaults to 1):
Temperature of distribution.
eps (`float`, *optional*, defaults to 1e-4):
Small number for numerical stability.
Returns:
`torch.Tensor` of shape `(batch_size, num_channels, height, width)`:
Log binomial distribution logbinomial(p;t).
'''
pass
| 3
| 2
| 20
| 2
| 9
| 10
| 2
| 1.11
| 1
| 1
| 0
| 0
| 2
| 2
| 2
| 12
| 42
| 5
| 18
| 7
| 15
| 20
| 14
| 7
| 11
| 2
| 1
| 1
| 3
|
6,374
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/zoedepth/modeling_zoedepth.py
|
transformers.models.zoedepth.modeling_zoedepth.ZoeDepthAttractorLayer
|
from torch import nn
import torch
class ZoeDepthAttractorLayer(nn.Module):
def __init__(self, config, n_bins, n_attractors=16, min_depth=0.001, max_depth=10, memory_efficient=False):
"""
Attractor layer for bin centers. Bin centers are bounded on the interval (min_depth, max_depth)
"""
super().__init__()
self.alpha = config.attractor_alpha
self.gemma = config.attractor_gamma
self.kind = config.attractor_kind
self.n_attractors = n_attractors
self.n_bins = n_bins
self.min_depth = min_depth
self.max_depth = max_depth
self.memory_efficient = memory_efficient
in_features = mlp_dim = config.bin_embedding_dim
self.conv1 = nn.Conv2d(in_features, mlp_dim, 1, 1, 0)
self.act1 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(mlp_dim, n_attractors * 2, 1, 1, 0)
self.act2 = nn.ReLU(inplace=True)
def forward(self, x, prev_bin, prev_bin_embedding=None, interpolate=True):
"""
The forward pass of the attractor layer. This layer predicts the new bin centers based on the previous bin centers
and the attractor points (the latter are predicted by the MLP).
Args:
x (`torch.Tensor` of shape `(batch_size, num_channels, height, width)`):
Feature block.
prev_bin (`torch.Tensor` of shape `(batch_size, prev_number_of_bins, height, width)`):
Previous bin centers normed.
prev_bin_embedding (`torch.Tensor`, *optional*):
Optional previous bin embeddings.
interpolate (`bool`, *optional*, defaults to `True`):
Whether to interpolate the previous bin embeddings to the size of the input features.
Returns:
`tuple[`torch.Tensor`, `torch.Tensor`]:
New bin centers normed and scaled.
"""
if prev_bin_embedding is not None:
if interpolate:
prev_bin_embedding = nn.functional.interpolate(prev_bin_embedding, x.shape[-2:], mode='bilinear', align_corners=True)
x = x + prev_bin_embedding
x = self.conv1(x)
x = self.act1(x)
x = self.conv2(x)
attractors = self.act2(x)
attractors = attractors + 0.001
batch_size, _, height, width = attractors.shape
attractors = attractors.view(batch_size, self.n_attractors, 2, height, width)
attractors_normed = attractors[:, :, 0, ...]
bin_centers = nn.functional.interpolate(prev_bin, (height, width), mode='bilinear', align_corners=True)
if not self.memory_efficient:
func = {'mean': torch.mean, 'sum': torch.sum}[self.kind]
delta_c = func(inv_attractor(attractors_normed.unsqueeze(2) - bin_centers.unsqueeze(1)), dim=1)
else:
delta_c = torch.zeros_like(bin_centers, device=bin_centers.device)
for i in range(self.n_attractors):
delta_c += inv_attractor(attractors_normed[:, i, ...].unsqueeze(1) - bin_centers)
if self.kind == 'mean':
delta_c = delta_c / self.n_attractors
bin_new_centers = bin_centers + delta_c
bin_centers = (self.max_depth - self.min_depth) * bin_new_centers + self.min_depth
bin_centers, _ = torch.sort(bin_centers, dim=1)
bin_centers = torch.clip(bin_centers, self.min_depth, self.max_depth)
return (bin_new_centers, bin_centers)
|
class ZoeDepthAttractorLayer(nn.Module):
def __init__(self, config, n_bins, n_attractors=16, min_depth=0.001, max_depth=10, memory_efficient=False):
'''
Attractor layer for bin centers. Bin centers are bounded on the interval (min_depth, max_depth)
'''
pass
def forward(self, x, prev_bin, prev_bin_embedding=None, interpolate=True):
'''
The forward pass of the attractor layer. This layer predicts the new bin centers based on the previous bin centers
and the attractor points (the latter are predicted by the MLP).
Args:
x (`torch.Tensor` of shape `(batch_size, num_channels, height, width)`):
Feature block.
prev_bin (`torch.Tensor` of shape `(batch_size, prev_number_of_bins, height, width)`):
Previous bin centers normed.
prev_bin_embedding (`torch.Tensor`, *optional*):
Optional previous bin embeddings.
interpolate (`bool`, *optional*, defaults to `True`):
Whether to interpolate the previous bin embeddings to the size of the input features.
Returns:
`tuple[`torch.Tensor`, `torch.Tensor`]:
New bin centers normed and scaled.
'''
pass
| 3
| 2
| 46
| 6
| 27
| 14
| 4
| 0.52
| 1
| 2
| 0
| 0
| 2
| 12
| 2
| 12
| 93
| 13
| 54
| 32
| 43
| 28
| 43
| 24
| 40
| 6
| 1
| 2
| 7
|
6,375
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/zoedepth/modeling_zoedepth.py
|
transformers.models.zoedepth.modeling_zoedepth.ZoeDepthAttractorLayerUnnormed
|
from torch import nn
import torch
class ZoeDepthAttractorLayerUnnormed(nn.Module):
def __init__(self, config, n_bins, n_attractors=16, min_depth=0.001, max_depth=10, memory_efficient=True):
"""
Attractor layer for bin centers. Bin centers are unbounded
"""
super().__init__()
self.n_attractors = n_attractors
self.n_bins = n_bins
self.min_depth = min_depth
self.max_depth = max_depth
self.alpha = config.attractor_alpha
self.gamma = config.attractor_alpha
self.kind = config.attractor_kind
self.memory_efficient = memory_efficient
in_features = mlp_dim = config.bin_embedding_dim
self.conv1 = nn.Conv2d(in_features, mlp_dim, 1, 1, 0)
self.act1 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(mlp_dim, n_attractors, 1, 1, 0)
self.act2 = nn.Softplus()
def forward(self, x, prev_bin, prev_bin_embedding=None, interpolate=True):
"""
The forward pass of the attractor layer. This layer predicts the new bin centers based on the previous bin centers
and the attractor points (the latter are predicted by the MLP).
Args:
x (`torch.Tensor` of shape (batch_size, num_channels, height, width)`):
Feature block.
prev_bin (`torch.Tensor` of shape (batch_size, prev_num_bins, height, width)`):
Previous bin centers normed.
prev_bin_embedding (`torch.Tensor`, *optional*):
Optional previous bin embeddings.
interpolate (`bool`, *optional*, defaults to `True`):
Whether to interpolate the previous bin embeddings to the size of the input features.
Returns:
`tuple[`torch.Tensor`, `torch.Tensor`]:
New bin centers unbounded. Two outputs just to keep the API consistent with the normed version.
"""
if prev_bin_embedding is not None:
if interpolate:
prev_bin_embedding = nn.functional.interpolate(prev_bin_embedding, x.shape[-2:], mode='bilinear', align_corners=True)
x = x + prev_bin_embedding
x = self.conv1(x)
x = self.act1(x)
x = self.conv2(x)
attractors = self.act2(x)
height, width = attractors.shape[-2:]
bin_centers = nn.functional.interpolate(prev_bin, (height, width), mode='bilinear', align_corners=True)
if not self.memory_efficient:
func = {'mean': torch.mean, 'sum': torch.sum}[self.kind]
delta_c = func(inv_attractor(attractors.unsqueeze(2) - bin_centers.unsqueeze(1)), dim=1)
else:
delta_c = torch.zeros_like(bin_centers, device=bin_centers.device)
for i in range(self.n_attractors):
delta_c += inv_attractor(attractors[:, i, ...].unsqueeze(1) - bin_centers)
if self.kind == 'mean':
delta_c = delta_c / self.n_attractors
bin_new_centers = bin_centers + delta_c
bin_centers = bin_new_centers
return (bin_new_centers, bin_centers)
|
class ZoeDepthAttractorLayerUnnormed(nn.Module):
def __init__(self, config, n_bins, n_attractors=16, min_depth=0.001, max_depth=10, memory_efficient=True):
'''
Attractor layer for bin centers. Bin centers are unbounded
'''
pass
def forward(self, x, prev_bin, prev_bin_embedding=None, interpolate=True):
'''
The forward pass of the attractor layer. This layer predicts the new bin centers based on the previous bin centers
and the attractor points (the latter are predicted by the MLP).
Args:
x (`torch.Tensor` of shape (batch_size, num_channels, height, width)`):
Feature block.
prev_bin (`torch.Tensor` of shape (batch_size, prev_num_bins, height, width)`):
Previous bin centers normed.
prev_bin_embedding (`torch.Tensor`, *optional*):
Optional previous bin embeddings.
interpolate (`bool`, *optional*, defaults to `True`):
Whether to interpolate the previous bin embeddings to the size of the input features.
Returns:
`tuple[`torch.Tensor`, `torch.Tensor`]:
New bin centers unbounded. Two outputs just to keep the API consistent with the normed version.
'''
pass
| 3
| 2
| 40
| 6
| 24
| 11
| 4
| 0.43
| 1
| 2
| 0
| 0
| 2
| 12
| 2
| 12
| 82
| 12
| 49
| 31
| 38
| 21
| 38
| 23
| 35
| 6
| 1
| 2
| 7
|
6,376
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/zoedepth/modeling_zoedepth.py
|
transformers.models.zoedepth.modeling_zoedepth.ZoeDepthConditionalLogBinomialSoftmax
|
import torch
from torch import nn
class ZoeDepthConditionalLogBinomialSoftmax(nn.Module):
def __init__(self, config, in_features, condition_dim, n_classes=256, bottleneck_factor=2):
"""Per-pixel MLP followed by a Conditional Log Binomial softmax.
Args:
in_features (`int`):
Number of input channels in the main feature.
condition_dim (`int`):
Number of input channels in the condition feature.
n_classes (`int`, *optional*, defaults to 256):
Number of classes.
bottleneck_factor (`int`, *optional*, defaults to 2):
Hidden dim factor.
"""
super().__init__()
bottleneck = (in_features + condition_dim) // bottleneck_factor
self.mlp = nn.Sequential(nn.Conv2d(in_features + condition_dim, bottleneck, kernel_size=1, stride=1, padding=0), nn.GELU(), nn.Conv2d(bottleneck, 2 + 2, kernel_size=1, stride=1, padding=0), nn.Softplus())
self.p_eps = 0.0001
self.max_temp = config.max_temp
self.min_temp = config.min_temp
self.log_binomial_transform = LogBinomialSoftmax(n_classes, act=torch.softmax)
def forward(self, main_feature, condition_feature):
"""
Args:
main_feature (`torch.Tensor` of shape `(batch_size, num_channels, height, width)`):
Main feature.
condition_feature (torch.Tensor of shape `(batch_size, num_channels, height, width)`):
Condition feature.
Returns:
`torch.Tensor`:
Output log binomial distribution
"""
probabilities_and_temperature = self.mlp(torch.concat((main_feature, condition_feature), dim=1))
probabilities, temperature = (probabilities_and_temperature[:, :2, ...], probabilities_and_temperature[:, 2:, ...])
probabilities = probabilities + self.p_eps
probabilities = probabilities[:, 0, ...] / (probabilities[:, 0, ...] + probabilities[:, 1, ...])
temperature = temperature + self.p_eps
temperature = temperature[:, 0, ...] / (temperature[:, 0, ...] + temperature[:, 1, ...])
temperature = temperature.unsqueeze(1)
temperature = (self.max_temp - self.min_temp) * temperature + self.min_temp
return self.log_binomial_transform(probabilities, temperature)
|
class ZoeDepthConditionalLogBinomialSoftmax(nn.Module):
def __init__(self, config, in_features, condition_dim, n_classes=256, bottleneck_factor=2):
'''Per-pixel MLP followed by a Conditional Log Binomial softmax.
Args:
in_features (`int`):
Number of input channels in the main feature.
condition_dim (`int`):
Number of input channels in the condition feature.
n_classes (`int`, *optional*, defaults to 256):
Number of classes.
bottleneck_factor (`int`, *optional*, defaults to 2):
Hidden dim factor.
'''
pass
def forward(self, main_feature, condition_feature):
'''
Args:
main_feature (`torch.Tensor` of shape `(batch_size, num_channels, height, width)`):
Main feature.
condition_feature (torch.Tensor of shape `(batch_size, num_channels, height, width)`):
Condition feature.
Returns:
`torch.Tensor`:
Output log binomial distribution
'''
pass
| 3
| 2
| 32
| 4
| 17
| 11
| 1
| 0.65
| 1
| 2
| 1
| 0
| 2
| 5
| 2
| 12
| 65
| 9
| 34
| 18
| 24
| 22
| 19
| 11
| 16
| 1
| 1
| 0
| 2
|
6,377
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/zoedepth/modeling_zoedepth.py
|
transformers.models.zoedepth.modeling_zoedepth.ZoeDepthDepthEstimatorOutput
|
import torch
from dataclasses import dataclass
from ...utils import ModelOutput, auto_docstring, logging
from typing import Optional, Union
@dataclass
@auto_docstring(custom_intro='\n Extension of `DepthEstimatorOutput` to include domain logits (ZoeDepth specific).\n ')
class ZoeDepthDepthEstimatorOutput(ModelOutput):
"""
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Classification (or regression if config.num_labels==1) loss.
domain_logits (`torch.FloatTensor` of shape `(batch_size, num_domains)`):
Logits for each domain (e.g. NYU and KITTI) in case multiple metric heads are used.
"""
loss: Optional[torch.FloatTensor] = None
predicted_depth: Optional[torch.FloatTensor] = None
domain_logits: Optional[torch.FloatTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
attentions: Optional[tuple[torch.FloatTensor, ...]] = None
|
@dataclass
@auto_docstring(custom_intro='\n Extension of `DepthEstimatorOutput` to include domain logits (ZoeDepth specific).\n ')
class ZoeDepthDepthEstimatorOutput(ModelOutput):
'''
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Classification (or regression if config.num_labels==1) loss.
domain_logits (`torch.FloatTensor` of shape `(batch_size, num_domains)`):
Logits for each domain (e.g. NYU and KITTI) in case multiple metric heads are used.
'''
pass
| 3
| 1
| 0
| 0
| 0
| 0
| 0
| 3.17
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 31
| 6
| 6
| 6
| 5
| 19
| 6
| 6
| 5
| 0
| 1
| 0
| 0
|
6,378
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/zoedepth/modeling_zoedepth.py
|
transformers.models.zoedepth.modeling_zoedepth.ZoeDepthFeatureFusionLayer
|
from typing import Optional, Union
from torch import nn
from .configuration_zoedepth import ZoeDepthConfig
import torch
class ZoeDepthFeatureFusionLayer(nn.Module):
"""Feature fusion layer, merges feature maps from different stages.
Args:
config (`[ZoeDepthConfig]`):
Model configuration class defining the model architecture.
align_corners (`bool`, *optional*, defaults to `True`):
The align_corner setting for bilinear upsample.
"""
def __init__(self, config: ZoeDepthConfig, align_corners: bool=True):
super().__init__()
self.align_corners = align_corners
self.projection = nn.Conv2d(config.fusion_hidden_size, config.fusion_hidden_size, kernel_size=1, bias=True)
self.residual_layer1 = ZoeDepthPreActResidualLayer(config)
self.residual_layer2 = ZoeDepthPreActResidualLayer(config)
def forward(self, hidden_state: torch.Tensor, residual: Optional[torch.Tensor]=None) -> torch.Tensor:
if residual is not None:
if hidden_state.shape != residual.shape:
residual = nn.functional.interpolate(residual, size=(hidden_state.shape[2], hidden_state.shape[3]), mode='bilinear', align_corners=False)
hidden_state = hidden_state + self.residual_layer1(residual)
hidden_state = self.residual_layer2(hidden_state)
hidden_state = nn.functional.interpolate(hidden_state, scale_factor=2, mode='bilinear', align_corners=self.align_corners)
hidden_state = self.projection(hidden_state)
return hidden_state
|
class ZoeDepthFeatureFusionLayer(nn.Module):
'''Feature fusion layer, merges feature maps from different stages.
Args:
config (`[ZoeDepthConfig]`):
Model configuration class defining the model architecture.
align_corners (`bool`, *optional*, defaults to `True`):
The align_corner setting for bilinear upsample.
'''
def __init__(self, config: ZoeDepthConfig, align_corners: bool=True):
pass
def forward(self, hidden_state: torch.Tensor, residual: Optional[torch.Tensor]=None) -> torch.Tensor:
pass
| 3
| 1
| 12
| 3
| 10
| 0
| 2
| 0.35
| 1
| 2
| 1
| 0
| 2
| 4
| 2
| 12
| 35
| 8
| 20
| 7
| 17
| 7
| 16
| 7
| 13
| 3
| 1
| 2
| 4
|
6,379
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/zoedepth/modeling_zoedepth.py
|
transformers.models.zoedepth.modeling_zoedepth.ZoeDepthFeatureFusionStage
|
from .configuration_zoedepth import ZoeDepthConfig
from torch import nn
class ZoeDepthFeatureFusionStage(nn.Module):
def __init__(self, config: ZoeDepthConfig):
super().__init__()
self.layers = nn.ModuleList()
for _ in range(len(config.neck_hidden_sizes)):
self.layers.append(ZoeDepthFeatureFusionLayer(config))
def forward(self, hidden_states):
hidden_states = hidden_states[::-1]
fused_hidden_states = []
fused_hidden_state = None
for hidden_state, layer in zip(hidden_states, self.layers):
if fused_hidden_state is None:
fused_hidden_state = layer(hidden_state)
else:
fused_hidden_state = layer(fused_hidden_state, hidden_state)
fused_hidden_states.append(fused_hidden_state)
return fused_hidden_states
|
class ZoeDepthFeatureFusionStage(nn.Module):
def __init__(self, config: ZoeDepthConfig):
pass
def forward(self, hidden_states):
pass
| 3
| 0
| 10
| 1
| 8
| 1
| 3
| 0.12
| 1
| 4
| 1
| 0
| 2
| 1
| 2
| 12
| 22
| 3
| 17
| 8
| 14
| 2
| 16
| 8
| 13
| 3
| 1
| 2
| 5
|
6,380
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/zoedepth/modeling_zoedepth.py
|
transformers.models.zoedepth.modeling_zoedepth.ZoeDepthForDepthEstimation
|
from ...utils import ModelOutput, auto_docstring, logging
from ...modeling_outputs import DepthEstimatorOutput
import torch
from typing import Optional, Union
from ...utils.backbone_utils import load_backbone
@auto_docstring(custom_intro='\n ZoeDepth model with one or multiple metric depth estimation head(s) on top.\n ')
class ZoeDepthForDepthEstimation(ZoeDepthPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.backbone = load_backbone(config)
if hasattr(self.backbone.config, 'hidden_size') and hasattr(self.backbone.config, 'patch_size'):
config.backbone_hidden_size = self.backbone.config.hidden_size
self.patch_size = self.backbone.config.patch_size
else:
raise ValueError("ZoeDepth assumes the backbone's config to have `hidden_size` and `patch_size` attributes")
self.neck = ZoeDepthNeck(config)
self.relative_head = ZoeDepthRelativeDepthEstimationHead(config)
self.metric_head = ZoeDepthMultipleMetricDepthEstimationHeads(config) if len(config.bin_configurations) > 1 else ZoeDepthMetricDepthEstimationHead(config)
self.post_init()
@auto_docstring
def forward(self, pixel_values: torch.FloatTensor, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.Tensor], DepthEstimatorOutput]:
"""
labels (`torch.LongTensor` of shape `(batch_size, height, width)`, *optional*):
Ground truth depth estimation maps for computing the loss.
Examples:
```python
>>> from transformers import AutoImageProcessor, ZoeDepthForDepthEstimation
>>> import torch
>>> import numpy as np
>>> from PIL import Image
>>> import requests
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> image_processor = AutoImageProcessor.from_pretrained("Intel/zoedepth-nyu-kitti")
>>> model = ZoeDepthForDepthEstimation.from_pretrained("Intel/zoedepth-nyu-kitti")
>>> # prepare image for the model
>>> inputs = image_processor(images=image, return_tensors="pt")
>>> with torch.no_grad():
... outputs = model(**inputs)
>>> # interpolate to original size
>>> post_processed_output = image_processor.post_process_depth_estimation(
... outputs,
... source_sizes=[(image.height, image.width)],
... )
>>> # visualize the prediction
>>> predicted_depth = post_processed_output[0]["predicted_depth"]
>>> depth = predicted_depth * 255 / predicted_depth.max()
>>> depth = depth.detach().cpu().numpy()
>>> depth = Image.fromarray(depth.astype("uint8"))
```"""
loss = None
if labels is not None:
raise NotImplementedError('Training is not implemented yet')
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
outputs = self.backbone.forward_with_filtered_kwargs(pixel_values, output_hidden_states=output_hidden_states, output_attentions=output_attentions)
hidden_states = outputs.feature_maps
_, _, height, width = pixel_values.shape
patch_size = self.patch_size
patch_height = height // patch_size
patch_width = width // patch_size
hidden_states, features = self.neck(hidden_states, patch_height, patch_width)
out = [features] + hidden_states
relative_depth, features = self.relative_head(hidden_states)
out = [features] + out
metric_depth, domain_logits = self.metric_head(outconv_activation=out[0], bottleneck=out[1], feature_blocks=out[2:], relative_depth=relative_depth)
metric_depth = metric_depth.squeeze(dim=1)
if not return_dict:
if domain_logits is not None:
output = (metric_depth, domain_logits) + outputs[1:]
else:
output = (metric_depth,) + outputs[1:]
return (loss,) + output if loss is not None else output
return ZoeDepthDepthEstimatorOutput(loss=loss, predicted_depth=metric_depth, domain_logits=domain_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
|
@auto_docstring(custom_intro='\n ZoeDepth model with one or multiple metric depth estimation head(s) on top.\n ')
class ZoeDepthForDepthEstimation(ZoeDepthPreTrainedModel):
def __init__(self, config):
pass
@auto_docstring
def forward(self, pixel_values: torch.FloatTensor, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.Tensor], DepthEstimatorOutput]:
'''
labels (`torch.LongTensor` of shape `(batch_size, height, width)`, *optional*):
Ground truth depth estimation maps for computing the loss.
Examples:
```python
>>> from transformers import AutoImageProcessor, ZoeDepthForDepthEstimation
>>> import torch
>>> import numpy as np
>>> from PIL import Image
>>> import requests
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> image_processor = AutoImageProcessor.from_pretrained("Intel/zoedepth-nyu-kitti")
>>> model = ZoeDepthForDepthEstimation.from_pretrained("Intel/zoedepth-nyu-kitti")
>>> # prepare image for the model
>>> inputs = image_processor(images=image, return_tensors="pt")
>>> with torch.no_grad():
... outputs = model(**inputs)
>>> # interpolate to original size
>>> post_processed_output = image_processor.post_process_depth_estimation(
... outputs,
... source_sizes=[(image.height, image.width)],
... )
>>> # visualize the prediction
>>> predicted_depth = post_processed_output[0]["predicted_depth"]
>>> depth = predicted_depth * 255 / predicted_depth.max()
>>> depth = depth.detach().cpu().numpy()
>>> depth = Image.fromarray(depth.astype("uint8"))
```'''
pass
| 5
| 1
| 59
| 12
| 32
| 16
| 6
| 0.47
| 1
| 11
| 6
| 0
| 2
| 5
| 2
| 3
| 122
| 25
| 66
| 28
| 54
| 31
| 37
| 20
| 34
| 8
| 2
| 2
| 11
|
6,381
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/zoedepth/modeling_zoedepth.py
|
transformers.models.zoedepth.modeling_zoedepth.ZoeDepthMLPClassifier
|
from torch import nn
class ZoeDepthMLPClassifier(nn.Module):
def __init__(self, in_features, out_features) -> None:
super().__init__()
hidden_features = in_features
self.linear1 = nn.Linear(in_features, hidden_features)
self.activation = nn.ReLU()
self.linear2 = nn.Linear(hidden_features, out_features)
def forward(self, hidden_state):
hidden_state = self.linear1(hidden_state)
hidden_state = self.activation(hidden_state)
domain_logits = self.linear2(hidden_state)
return domain_logits
|
class ZoeDepthMLPClassifier(nn.Module):
def __init__(self, in_features, out_features) -> None:
pass
def forward(self, hidden_state):
pass
| 3
| 0
| 7
| 1
| 6
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 2
| 3
| 2
| 12
| 15
| 3
| 12
| 8
| 9
| 0
| 12
| 8
| 9
| 1
| 1
| 0
| 2
|
6,382
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/zoedepth/modeling_zoedepth.py
|
transformers.models.zoedepth.modeling_zoedepth.ZoeDepthMetricDepthEstimationHead
|
import torch
from torch import nn
class ZoeDepthMetricDepthEstimationHead(nn.Module):
def __init__(self, config):
super().__init__()
bin_configuration = config.bin_configurations[0]
n_bins = bin_configuration['n_bins']
min_depth = bin_configuration['min_depth']
max_depth = bin_configuration['max_depth']
bin_embedding_dim = config.bin_embedding_dim
n_attractors = config.num_attractors
bin_centers_type = config.bin_centers_type
self.min_depth = min_depth
self.max_depth = max_depth
self.bin_centers_type = bin_centers_type
bottleneck_features = config.bottleneck_features
self.conv2 = nn.Conv2d(bottleneck_features, bottleneck_features, kernel_size=1, stride=1, padding=0)
if self.bin_centers_type == 'normed':
Attractor = ZoeDepthAttractorLayer
elif self.bin_centers_type == 'softplus':
Attractor = ZoeDepthAttractorLayerUnnormed
self.seed_bin_regressor = ZoeDepthSeedBinRegressor(config, n_bins=n_bins, min_depth=min_depth, max_depth=max_depth)
self.seed_projector = ZoeDepthProjector(in_features=bottleneck_features, out_features=bin_embedding_dim)
self.projectors = nn.ModuleList([ZoeDepthProjector(in_features=config.fusion_hidden_size, out_features=bin_embedding_dim) for _ in range(4)])
self.attractors = nn.ModuleList([Attractor(config, n_bins=n_bins, n_attractors=n_attractors[i], min_depth=min_depth, max_depth=max_depth) for i in range(4)])
last_in = config.num_relative_features + 1
self.conditional_log_binomial = ZoeDepthConditionalLogBinomialSoftmax(config, last_in, bin_embedding_dim, n_classes=n_bins)
def forward(self, outconv_activation, bottleneck, feature_blocks, relative_depth):
x = self.conv2(bottleneck)
_, seed_bin_centers = self.seed_bin_regressor(x)
if self.bin_centers_type in ['normed', 'hybrid2']:
prev_bin = (seed_bin_centers - self.min_depth) / (self.max_depth - self.min_depth)
else:
prev_bin = seed_bin_centers
prev_bin_embedding = self.seed_projector(x)
for projector, attractor, feature in zip(self.projectors, self.attractors, feature_blocks):
bin_embedding = projector(feature)
bin, bin_centers = attractor(bin_embedding, prev_bin, prev_bin_embedding, interpolate=True)
prev_bin = bin.clone()
prev_bin_embedding = bin_embedding.clone()
last = outconv_activation
relative_conditioning = relative_depth.unsqueeze(1)
relative_conditioning = nn.functional.interpolate(relative_conditioning, size=last.shape[2:], mode='bilinear', align_corners=True)
last = torch.cat([last, relative_conditioning], dim=1)
bin_embedding = nn.functional.interpolate(bin_embedding, last.shape[-2:], mode='bilinear', align_corners=True)
x = self.conditional_log_binomial(last, bin_embedding)
bin_centers = nn.functional.interpolate(bin_centers, x.shape[-2:], mode='bilinear', align_corners=True)
out = torch.sum(x * bin_centers, dim=1, keepdim=True)
return (out, None)
|
class ZoeDepthMetricDepthEstimationHead(nn.Module):
def __init__(self, config):
pass
def forward(self, outconv_activation, bottleneck, feature_blocks, relative_depth):
pass
| 3
| 0
| 47
| 8
| 36
| 4
| 3
| 0.1
| 1
| 8
| 5
| 0
| 2
| 9
| 2
| 12
| 95
| 17
| 72
| 33
| 69
| 7
| 45
| 32
| 42
| 3
| 1
| 1
| 6
|
6,383
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/zoedepth/modeling_zoedepth.py
|
transformers.models.zoedepth.modeling_zoedepth.ZoeDepthMultiheadAttention
|
from torch import nn
import math
import torch
from typing import Optional, Union
class ZoeDepthMultiheadAttention(nn.Module):
"""Equivalent implementation of nn.MultiheadAttention with `batch_first=True`."""
def __init__(self, hidden_size, num_attention_heads, dropout):
super().__init__()
if hidden_size % num_attention_heads != 0:
raise ValueError(f'The hidden size ({hidden_size}) is not a multiple of the number of attention heads ({num_attention_heads})')
self.num_attention_heads = num_attention_heads
self.attention_head_size = int(hidden_size / num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(hidden_size, self.all_head_size)
self.key = nn.Linear(hidden_size, self.all_head_size)
self.value = nn.Linear(hidden_size, self.all_head_size)
self.out_proj = nn.Linear(hidden_size, hidden_size)
self.dropout = nn.Dropout(dropout)
def forward(self, queries: torch.Tensor, keys: torch.Tensor, values: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False) -> tuple[torch.Tensor]:
batch_size, seq_length, _ = queries.shape
query_layer = self.query(queries).view(batch_size, -1, self.num_attention_heads, self.attention_head_size).transpose(1, 2)
key_layer = self.key(keys).view(batch_size, -1, self.num_attention_heads, self.attention_head_size).transpose(1, 2)
value_layer = self.value(values).view(batch_size, -1, self.num_attention_heads, self.attention_head_size).transpose(1, 2)
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
if attention_mask is not None:
attention_scores = attention_scores + attention_mask
attention_probs = nn.functional.softmax(attention_scores, dim=-1)
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(new_context_layer_shape)
context_layer = self.out_proj(context_layer)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
return outputs
|
class ZoeDepthMultiheadAttention(nn.Module):
'''Equivalent implementation of nn.MultiheadAttention with `batch_first=True`.'''
def __init__(self, hidden_size, num_attention_heads, dropout):
pass
def forward(self, queries: torch.Tensor, keys: torch.Tensor, values: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False) -> tuple[torch.Tensor]:
pass
| 3
| 1
| 20
| 4
| 14
| 2
| 2
| 0.16
| 1
| 5
| 0
| 0
| 3
| 8
| 3
| 13
| 67
| 16
| 44
| 28
| 33
| 7
| 34
| 21
| 30
| 3
| 1
| 1
| 6
|
6,384
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/zoedepth/modeling_zoedepth.py
|
transformers.models.zoedepth.modeling_zoedepth.ZoeDepthMultipleMetricDepthEstimationHeads
|
from torch import nn
import torch
class ZoeDepthMultipleMetricDepthEstimationHeads(nn.Module):
"""
Multiple metric depth estimation heads. A MLP classifier is used to route between 2 different heads.
"""
def __init__(self, config):
super().__init__()
bin_embedding_dim = config.bin_embedding_dim
n_attractors = config.num_attractors
self.bin_configurations = config.bin_configurations
self.bin_centers_type = config.bin_centers_type
bottleneck_features = config.bottleneck_features
self.conv2 = nn.Conv2d(bottleneck_features, bottleneck_features, kernel_size=1, stride=1, padding=0)
self.patch_transformer = ZoeDepthPatchTransformerEncoder(config)
self.mlp_classifier = ZoeDepthMLPClassifier(in_features=128, out_features=2)
if self.bin_centers_type == 'normed':
Attractor = ZoeDepthAttractorLayer
elif self.bin_centers_type == 'softplus':
Attractor = ZoeDepthAttractorLayerUnnormed
self.seed_bin_regressors = nn.ModuleDict({conf['name']: ZoeDepthSeedBinRegressor(config, n_bins=conf['n_bins'], mlp_dim=bin_embedding_dim // 2, min_depth=conf['min_depth'], max_depth=conf['max_depth']) for conf in config.bin_configurations})
self.seed_projector = ZoeDepthProjector(in_features=bottleneck_features, out_features=bin_embedding_dim, mlp_dim=bin_embedding_dim // 2)
self.projectors = nn.ModuleList([ZoeDepthProjector(in_features=config.fusion_hidden_size, out_features=bin_embedding_dim, mlp_dim=bin_embedding_dim // 2) for _ in range(4)])
self.attractors = nn.ModuleDict({configuration['name']: nn.ModuleList([Attractor(config, n_bins=n_attractors[i], min_depth=configuration['min_depth'], max_depth=configuration['max_depth']) for i in range(len(n_attractors))]) for configuration in config.bin_configurations})
last_in = config.num_relative_features
self.conditional_log_binomial = nn.ModuleDict({configuration['name']: ZoeDepthConditionalLogBinomialSoftmax(config, last_in, bin_embedding_dim, configuration['n_bins'], bottleneck_factor=4) for configuration in config.bin_configurations})
def forward(self, outconv_activation, bottleneck, feature_blocks, relative_depth):
x = self.conv2(bottleneck)
embedding = self.patch_transformer(x)[:, 0, :]
domain_logits = self.mlp_classifier(embedding)
domain_vote = torch.softmax(domain_logits.sum(dim=0, keepdim=True), dim=-1)
names = [configuration['name'] for configuration in self.bin_configurations]
bin_configurations_name = names[torch.argmax(domain_vote, dim=-1).squeeze().item()]
try:
conf = [config for config in self.bin_configurations if config['name'] == bin_configurations_name][0]
except IndexError:
raise ValueError(f'bin_configurations_name {bin_configurations_name} not found in bin_configurationss')
min_depth = conf['min_depth']
max_depth = conf['max_depth']
seed_bin_regressor = self.seed_bin_regressors[bin_configurations_name]
_, seed_bin_centers = seed_bin_regressor(x)
if self.bin_centers_type in ['normed', 'hybrid2']:
prev_bin = (seed_bin_centers - min_depth) / (max_depth - min_depth)
else:
prev_bin = seed_bin_centers
prev_bin_embedding = self.seed_projector(x)
attractors = self.attractors[bin_configurations_name]
for projector, attractor, feature in zip(self.projectors, attractors, feature_blocks):
bin_embedding = projector(feature)
bin, bin_centers = attractor(bin_embedding, prev_bin, prev_bin_embedding, interpolate=True)
prev_bin = bin
prev_bin_embedding = bin_embedding
last = outconv_activation
bin_centers = nn.functional.interpolate(bin_centers, last.shape[-2:], mode='bilinear', align_corners=True)
bin_embedding = nn.functional.interpolate(bin_embedding, last.shape[-2:], mode='bilinear', align_corners=True)
conditional_log_binomial = self.conditional_log_binomial[bin_configurations_name]
x = conditional_log_binomial(last, bin_embedding)
out = torch.sum(x * bin_centers, dim=1, keepdim=True)
return (out, domain_logits)
|
class ZoeDepthMultipleMetricDepthEstimationHeads(nn.Module):
'''
Multiple metric depth estimation heads. A MLP classifier is used to route between 2 different heads.
'''
def __init__(self, config):
pass
def forward(self, outconv_activation, bottleneck, feature_blocks, relative_depth):
pass
| 3
| 1
| 67
| 10
| 51
| 7
| 4
| 0.16
| 1
| 12
| 7
| 0
| 2
| 10
| 2
| 12
| 139
| 21
| 102
| 39
| 99
| 16
| 52
| 38
| 49
| 4
| 1
| 1
| 7
|
6,385
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/zoedepth/modeling_zoedepth.py
|
transformers.models.zoedepth.modeling_zoedepth.ZoeDepthNeck
|
from .configuration_zoedepth import ZoeDepthConfig
import torch
from torch import nn
class ZoeDepthNeck(nn.Module):
"""
ZoeDepthNeck. A neck is a module that is normally used between the backbone and the head. It takes a list of tensors as
input and produces another list of tensors as output. For ZoeDepth, it includes 2 stages:
* ZoeDepthReassembleStage
* ZoeDepthFeatureFusionStage.
Args:
config (dict): config dict.
"""
def __init__(self, config: ZoeDepthConfig):
super().__init__()
self.config = config
if config.backbone_config is not None and config.backbone_config.model_type in ['swinv2']:
self.reassemble_stage = None
else:
self.reassemble_stage = ZoeDepthReassembleStage(config)
self.convs = nn.ModuleList()
for channel in config.neck_hidden_sizes:
self.convs.append(nn.Conv2d(channel, config.fusion_hidden_size, kernel_size=3, padding=1, bias=False))
self.fusion_stage = ZoeDepthFeatureFusionStage(config)
def forward(self, hidden_states: list[torch.Tensor], patch_height, patch_width) -> list[torch.Tensor]:
"""
Args:
hidden_states (`list[torch.FloatTensor]`, each of shape `(batch_size, sequence_length, hidden_size)` or `(batch_size, hidden_size, height, width)`):
List of hidden states from the backbone.
"""
if not isinstance(hidden_states, (tuple, list)):
raise TypeError('hidden_states should be a tuple or list of tensors')
if len(hidden_states) != len(self.config.neck_hidden_sizes):
raise ValueError('The number of hidden states should be equal to the number of neck hidden sizes.')
if self.reassemble_stage is not None:
hidden_states = self.reassemble_stage(hidden_states, patch_height, patch_width)
features = [self.convs[i](feature) for i, feature in enumerate(hidden_states)]
output = self.fusion_stage(features)
return (output, features[-1])
|
class ZoeDepthNeck(nn.Module):
'''
ZoeDepthNeck. A neck is a module that is normally used between the backbone and the head. It takes a list of tensors as
input and produces another list of tensors as output. For ZoeDepth, it includes 2 stages:
* ZoeDepthReassembleStage
* ZoeDepthFeatureFusionStage.
Args:
config (dict): config dict.
'''
def __init__(self, config: ZoeDepthConfig):
pass
def forward(self, hidden_states: list[torch.Tensor], patch_height, patch_width) -> list[torch.Tensor]:
'''
Args:
hidden_states (`list[torch.FloatTensor]`, each of shape `(batch_size, sequence_length, hidden_size)` or `(batch_size, hidden_size, height, width)`):
List of hidden states from the backbone.
'''
pass
| 3
| 2
| 19
| 4
| 11
| 5
| 4
| 0.82
| 1
| 9
| 2
| 0
| 2
| 4
| 2
| 12
| 52
| 12
| 22
| 10
| 19
| 18
| 21
| 10
| 18
| 4
| 1
| 1
| 7
|
6,386
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/zoedepth/modeling_zoedepth.py
|
transformers.models.zoedepth.modeling_zoedepth.ZoeDepthPatchTransformerEncoder
|
import torch
from torch import nn
class ZoeDepthPatchTransformerEncoder(nn.Module):
def __init__(self, config):
"""ViT-like transformer block
Args:
config (`ZoeDepthConfig`):
Model configuration class defining the model architecture.
"""
super().__init__()
in_channels = config.bottleneck_features
self.transformer_encoder = nn.ModuleList([ZoeDepthTransformerEncoderLayer(config) for _ in range(config.num_patch_transformer_layers)])
self.embedding_convPxP = nn.Conv2d(in_channels, config.patch_transformer_hidden_size, kernel_size=1, stride=1, padding=0)
def positional_encoding_1d(self, batch_size, sequence_length, embedding_dim, device='cpu', dtype=torch.float32):
"""Generate positional encodings
Args:
sequence_length (int): Sequence length
embedding_dim (int): Embedding dimension
Returns:
torch.Tensor: Positional encodings.
"""
position = torch.arange(0, sequence_length, dtype=dtype, device=device).unsqueeze(1)
index = torch.arange(0, embedding_dim, 2, dtype=dtype, device=device).unsqueeze(0)
div_term = torch.exp(index * (-torch.log(torch.tensor(10000.0, device=device)) / embedding_dim))
pos_encoding = position * div_term
pos_encoding = torch.cat([torch.sin(pos_encoding), torch.cos(pos_encoding)], dim=1)
pos_encoding = pos_encoding.unsqueeze(dim=0).repeat(batch_size, 1, 1)
return pos_encoding
def forward(self, x):
"""Forward pass
Args:
x (torch.Tensor - NCHW): Input feature tensor
Returns:
torch.Tensor - Transformer output embeddings of shape (batch_size, sequence_length, embedding_dim)
"""
embeddings = self.embedding_convPxP(x).flatten(2)
embeddings = nn.functional.pad(embeddings, (1, 0))
embeddings = embeddings.permute(0, 2, 1)
batch_size, sequence_length, embedding_dim = embeddings.shape
embeddings = embeddings + self.positional_encoding_1d(batch_size, sequence_length, embedding_dim, device=embeddings.device, dtype=embeddings.dtype)
for i in range(4):
embeddings = self.transformer_encoder[i](embeddings)
return embeddings
|
class ZoeDepthPatchTransformerEncoder(nn.Module):
def __init__(self, config):
'''ViT-like transformer block
Args:
config (`ZoeDepthConfig`):
Model configuration class defining the model architecture.
'''
pass
def positional_encoding_1d(self, batch_size, sequence_length, embedding_dim, device='cpu', dtype=torch.float32):
'''Generate positional encodings
Args:
sequence_length (int): Sequence length
embedding_dim (int): Embedding dimension
Returns:
torch.Tensor: Positional encodings.
'''
pass
def forward(self, x):
'''Forward pass
Args:
x (torch.Tensor - NCHW): Input feature tensor
Returns:
torch.Tensor - Transformer output embeddings of shape (batch_size, sequence_length, embedding_dim)
'''
pass
| 4
| 3
| 19
| 4
| 9
| 7
| 1
| 0.69
| 1
| 3
| 1
| 0
| 3
| 2
| 3
| 13
| 61
| 13
| 29
| 15
| 25
| 20
| 23
| 14
| 19
| 2
| 1
| 1
| 4
|
6,387
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/zoedepth/modeling_zoedepth.py
|
transformers.models.zoedepth.modeling_zoedepth.ZoeDepthPreActResidualLayer
|
import torch
from torch import nn
class ZoeDepthPreActResidualLayer(nn.Module):
"""
ResidualConvUnit, pre-activate residual unit.
Args:
config (`[ZoeDepthConfig]`):
Model configuration class defining the model architecture.
"""
def __init__(self, config):
super().__init__()
self.use_batch_norm = config.use_batch_norm_in_fusion_residual
use_bias_in_fusion_residual = config.use_bias_in_fusion_residual if config.use_bias_in_fusion_residual is not None else not self.use_batch_norm
self.activation1 = nn.ReLU()
self.convolution1 = nn.Conv2d(config.fusion_hidden_size, config.fusion_hidden_size, kernel_size=3, stride=1, padding=1, bias=use_bias_in_fusion_residual)
self.activation2 = nn.ReLU()
self.convolution2 = nn.Conv2d(config.fusion_hidden_size, config.fusion_hidden_size, kernel_size=3, stride=1, padding=1, bias=use_bias_in_fusion_residual)
if self.use_batch_norm:
self.batch_norm1 = nn.BatchNorm2d(config.fusion_hidden_size, eps=config.batch_norm_eps)
self.batch_norm2 = nn.BatchNorm2d(config.fusion_hidden_size, eps=config.batch_norm_eps)
def forward(self, hidden_state: torch.Tensor) -> torch.Tensor:
residual = hidden_state
hidden_state = self.activation1(hidden_state)
hidden_state = self.convolution1(hidden_state)
if self.use_batch_norm:
hidden_state = self.batch_norm1(hidden_state)
hidden_state = self.activation2(hidden_state)
hidden_state = self.convolution2(hidden_state)
if self.use_batch_norm:
hidden_state = self.batch_norm2(hidden_state)
return hidden_state + residual
|
class ZoeDepthPreActResidualLayer(nn.Module):
'''
ResidualConvUnit, pre-activate residual unit.
Args:
config (`[ZoeDepthConfig]`):
Model configuration class defining the model architecture.
'''
def __init__(self, config):
pass
def forward(self, hidden_state: torch.Tensor) -> torch.Tensor:
pass
| 3
| 1
| 25
| 5
| 20
| 0
| 3
| 0.17
| 1
| 2
| 0
| 0
| 2
| 7
| 2
| 12
| 60
| 12
| 41
| 12
| 38
| 7
| 23
| 12
| 20
| 3
| 1
| 1
| 6
|
6,388
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/zoedepth/modeling_zoedepth.py
|
transformers.models.zoedepth.modeling_zoedepth.ZoeDepthPreTrainedModel
|
from torch import nn
from ...utils import ModelOutput, auto_docstring, logging
from .configuration_zoedepth import ZoeDepthConfig
from ...modeling_utils import PreTrainedModel
@auto_docstring
class ZoeDepthPreTrainedModel(PreTrainedModel):
config: ZoeDepthConfig
base_model_prefix = 'zoedepth'
main_input_name = 'pixel_values'
supports_gradient_checkpointing = True
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, (nn.Linear, nn.Conv2d, nn.ConvTranspose2d)):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
|
@auto_docstring
class ZoeDepthPreTrainedModel(PreTrainedModel):
def _init_weights(self, module):
'''Initialize the weights'''
pass
| 3
| 1
| 11
| 0
| 8
| 3
| 4
| 0.54
| 1
| 0
| 0
| 1
| 1
| 0
| 1
| 1
| 22
| 2
| 13
| 6
| 11
| 7
| 12
| 6
| 10
| 4
| 1
| 2
| 4
|
6,389
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/zoedepth/modeling_zoedepth.py
|
transformers.models.zoedepth.modeling_zoedepth.ZoeDepthProjector
|
import torch
from torch import nn
class ZoeDepthProjector(nn.Module):
def __init__(self, in_features, out_features, mlp_dim=128):
"""Projector MLP.
Args:
in_features (`int`):
Number of input channels.
out_features (`int`):
Number of output channels.
mlp_dim (`int`, *optional*, defaults to 128):
Hidden dimension.
"""
super().__init__()
self.conv1 = nn.Conv2d(in_features, mlp_dim, 1, 1, 0)
self.act = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(mlp_dim, out_features, 1, 1, 0)
def forward(self, hidden_state: torch.Tensor) -> torch.Tensor:
hidden_state = self.conv1(hidden_state)
hidden_state = self.act(hidden_state)
hidden_state = self.conv2(hidden_state)
return hidden_state
|
class ZoeDepthProjector(nn.Module):
def __init__(self, in_features, out_features, mlp_dim=128):
'''Projector MLP.
Args:
in_features (`int`):
Number of input channels.
out_features (`int`):
Number of output channels.
mlp_dim (`int`, *optional*, defaults to 128):
Hidden dimension.
'''
pass
def forward(self, hidden_state: torch.Tensor) -> torch.Tensor:
pass
| 3
| 1
| 11
| 2
| 5
| 5
| 1
| 0.82
| 1
| 2
| 0
| 0
| 2
| 3
| 2
| 12
| 24
| 4
| 11
| 6
| 8
| 9
| 11
| 6
| 8
| 1
| 1
| 0
| 2
|
6,390
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/zoedepth/modeling_zoedepth.py
|
transformers.models.zoedepth.modeling_zoedepth.ZoeDepthReassembleLayer
|
from torch import nn
class ZoeDepthReassembleLayer(nn.Module):
def __init__(self, config, channels, factor):
super().__init__()
hidden_size = config.backbone_hidden_size
self.projection = nn.Conv2d(in_channels=hidden_size, out_channels=channels, kernel_size=1)
if factor > 1:
self.resize = nn.ConvTranspose2d(channels, channels, kernel_size=factor, stride=factor, padding=0)
elif factor == 1:
self.resize = nn.Identity()
elif factor < 1:
self.resize = nn.Conv2d(channels, channels, kernel_size=3, stride=int(1 / factor), padding=1)
def forward(self, hidden_state):
hidden_state = self.projection(hidden_state)
hidden_state = self.resize(hidden_state)
return hidden_state
|
class ZoeDepthReassembleLayer(nn.Module):
def __init__(self, config, channels, factor):
pass
def forward(self, hidden_state):
pass
| 3
| 0
| 9
| 1
| 7
| 2
| 3
| 0.27
| 1
| 2
| 0
| 0
| 2
| 2
| 2
| 12
| 21
| 2
| 15
| 6
| 12
| 4
| 13
| 6
| 10
| 4
| 1
| 1
| 5
|
6,391
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/zoedepth/modeling_zoedepth.py
|
transformers.models.zoedepth.modeling_zoedepth.ZoeDepthReassembleStage
|
from ...activations import ACT2FN
import torch
from torch import nn
class ZoeDepthReassembleStage(nn.Module):
"""
This class reassembles the hidden states of the backbone into image-like feature representations at various
resolutions.
This happens in 3 stages:
1. Map the N + 1 tokens to a set of N tokens, by taking into account the readout ([CLS]) token according to
`config.readout_type`.
2. Project the channel dimension of the hidden states according to `config.neck_hidden_sizes`.
3. Resizing the spatial dimensions (height, width).
Args:
config (`[ZoeDepthConfig]`):
Model configuration class defining the model architecture.
"""
def __init__(self, config):
super().__init__()
self.readout_type = config.readout_type
self.layers = nn.ModuleList()
for neck_hidden_size, factor in zip(config.neck_hidden_sizes, config.reassemble_factors):
self.layers.append(ZoeDepthReassembleLayer(config, channels=neck_hidden_size, factor=factor))
if config.readout_type == 'project':
self.readout_projects = nn.ModuleList()
hidden_size = config.backbone_hidden_size
for _ in config.neck_hidden_sizes:
self.readout_projects.append(nn.Sequential(nn.Linear(2 * hidden_size, hidden_size), ACT2FN[config.hidden_act]))
def forward(self, hidden_states: list[torch.Tensor], patch_height, patch_width) -> list[torch.Tensor]:
"""
Args:
hidden_states (`list[torch.FloatTensor]`, each of shape `(batch_size, sequence_length + 1, hidden_size)`):
List of hidden states from the backbone.
"""
batch_size = hidden_states[0].shape[0]
hidden_states = torch.cat(hidden_states, dim=0)
cls_token, hidden_states = (hidden_states[:, 0], hidden_states[:, 1:])
total_batch_size, sequence_length, num_channels = hidden_states.shape
hidden_states = hidden_states.reshape(total_batch_size, patch_height, patch_width, num_channels)
hidden_states = hidden_states.permute(0, 3, 1, 2).contiguous()
if self.readout_type == 'project':
hidden_states = hidden_states.flatten(2).permute((0, 2, 1))
readout = cls_token.unsqueeze(dim=1).expand_as(hidden_states)
hidden_states = torch.cat((hidden_states, readout), -1)
elif self.readout_type == 'add':
hidden_states = hidden_states + cls_token.unsqueeze(-1)
out = []
for stage_idx, hidden_state in enumerate(hidden_states.split(batch_size, dim=0)):
if self.readout_type == 'project':
hidden_state = self.readout_projects[stage_idx](hidden_state)
hidden_state = hidden_state.permute(0, 2, 1).reshape(batch_size, -1, patch_height, patch_width)
hidden_state = self.layers[stage_idx](hidden_state)
out.append(hidden_state)
return out
|
class ZoeDepthReassembleStage(nn.Module):
'''
This class reassembles the hidden states of the backbone into image-like feature representations at various
resolutions.
This happens in 3 stages:
1. Map the N + 1 tokens to a set of N tokens, by taking into account the readout ([CLS]) token according to
`config.readout_type`.
2. Project the channel dimension of the hidden states according to `config.neck_hidden_sizes`.
3. Resizing the spatial dimensions (height, width).
Args:
config (`[ZoeDepthConfig]`):
Model configuration class defining the model architecture.
'''
def __init__(self, config):
pass
def forward(self, hidden_states: list[torch.Tensor], patch_height, patch_width) -> list[torch.Tensor]:
'''
Args:
hidden_states (`list[torch.FloatTensor]`, each of shape `(batch_size, sequence_length + 1, hidden_size)`):
List of hidden states from the backbone.
'''
pass
| 3
| 2
| 28
| 5
| 17
| 6
| 5
| 0.69
| 1
| 5
| 1
| 0
| 2
| 3
| 2
| 12
| 72
| 13
| 35
| 15
| 32
| 24
| 32
| 15
| 29
| 5
| 1
| 2
| 9
|
6,392
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/zoedepth/modeling_zoedepth.py
|
transformers.models.zoedepth.modeling_zoedepth.ZoeDepthRelativeDepthEstimationHead
|
from torch import nn
import torch
class ZoeDepthRelativeDepthEstimationHead(nn.Module):
"""
Relative depth estimation head consisting of 3 convolutional layers. It progressively halves the feature dimension and upsamples
the predictions to the input resolution after the first convolutional layer (details can be found in DPT's paper's
supplementary material).
"""
def __init__(self, config):
super().__init__()
self.head_in_index = config.head_in_index
self.projection = None
if config.add_projection:
self.projection = nn.Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
features = config.fusion_hidden_size
self.conv1 = nn.Conv2d(features, features // 2, kernel_size=3, stride=1, padding=1)
self.upsample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
self.conv2 = nn.Conv2d(features // 2, config.num_relative_features, kernel_size=3, stride=1, padding=1)
self.conv3 = nn.Conv2d(config.num_relative_features, 1, kernel_size=1, stride=1, padding=0)
def forward(self, hidden_states: list[torch.Tensor]) -> torch.Tensor:
hidden_states = hidden_states[self.head_in_index]
if self.projection is not None:
hidden_states = self.projection(hidden_states)
hidden_states = nn.ReLU()(hidden_states)
hidden_states = self.conv1(hidden_states)
hidden_states = self.upsample(hidden_states)
hidden_states = self.conv2(hidden_states)
hidden_states = nn.ReLU()(hidden_states)
features = hidden_states
hidden_states = self.conv3(hidden_states)
hidden_states = nn.ReLU()(hidden_states)
predicted_depth = hidden_states.squeeze(dim=1)
return (predicted_depth, features)
|
class ZoeDepthRelativeDepthEstimationHead(nn.Module):
'''
Relative depth estimation head consisting of 3 convolutional layers. It progressively halves the feature dimension and upsamples
the predictions to the input resolution after the first convolutional layer (details can be found in DPT's paper's
supplementary material).
'''
def __init__(self, config):
pass
def forward(self, hidden_states: list[torch.Tensor]) -> torch.Tensor:
pass
| 3
| 1
| 17
| 4
| 13
| 1
| 2
| 0.27
| 1
| 2
| 0
| 0
| 2
| 6
| 2
| 12
| 42
| 9
| 26
| 12
| 23
| 7
| 26
| 12
| 23
| 2
| 1
| 1
| 4
|
6,393
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/zoedepth/modeling_zoedepth.py
|
transformers.models.zoedepth.modeling_zoedepth.ZoeDepthSeedBinRegressor
|
from torch import nn
import torch
class ZoeDepthSeedBinRegressor(nn.Module):
def __init__(self, config, n_bins=16, mlp_dim=256, min_depth=0.001, max_depth=10):
"""Bin center regressor network.
Can be "normed" or "unnormed". If "normed", bin centers are bounded on the (min_depth, max_depth) interval.
Args:
config (`int`):
Model configuration.
n_bins (`int`, *optional*, defaults to 16):
Number of bin centers.
mlp_dim (`int`, *optional*, defaults to 256):
Hidden dimension.
min_depth (`float`, *optional*, defaults to 1e-3):
Min depth value.
max_depth (`float`, *optional*, defaults to 10):
Max depth value.
"""
super().__init__()
self.in_features = config.bottleneck_features
self.bin_centers_type = config.bin_centers_type
self.min_depth = min_depth
self.max_depth = max_depth
self.conv1 = nn.Conv2d(self.in_features, mlp_dim, 1, 1, 0)
self.act1 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(mlp_dim, n_bins, 1, 1, 0)
self.act2 = nn.ReLU(inplace=True) if self.bin_centers_type == 'normed' else nn.Softplus()
def forward(self, x):
"""
Returns tensor of bin_width vectors (centers). One vector b for every pixel
"""
x = self.conv1(x)
x = self.act1(x)
x = self.conv2(x)
bin_centers = self.act2(x)
if self.bin_centers_type == 'normed':
bin_centers = bin_centers + 0.001
bin_widths_normed = bin_centers / bin_centers.sum(dim=1, keepdim=True)
bin_widths = (self.max_depth - self.min_depth) * bin_widths_normed
bin_widths = nn.functional.pad(bin_widths, (0, 0, 0, 0, 1, 0), mode='constant', value=self.min_depth)
bin_edges = torch.cumsum(bin_widths, dim=1)
bin_centers = 0.5 * (bin_edges[:, :-1, ...] + bin_edges[:, 1:, ...])
return (bin_widths_normed, bin_centers)
else:
return (bin_centers, bin_centers)
|
class ZoeDepthSeedBinRegressor(nn.Module):
def __init__(self, config, n_bins=16, mlp_dim=256, min_depth=0.001, max_depth=10):
'''Bin center regressor network.
Can be "normed" or "unnormed". If "normed", bin centers are bounded on the (min_depth, max_depth) interval.
Args:
config (`int`):
Model configuration.
n_bins (`int`, *optional*, defaults to 16):
Number of bin centers.
mlp_dim (`int`, *optional*, defaults to 256):
Hidden dimension.
min_depth (`float`, *optional*, defaults to 1e-3):
Min depth value.
max_depth (`float`, *optional*, defaults to 10):
Max depth value.
'''
pass
def forward(self, x):
'''
Returns tensor of bin_width vectors (centers). One vector b for every pixel
'''
pass
| 3
| 2
| 26
| 4
| 13
| 10
| 2
| 0.77
| 1
| 1
| 0
| 0
| 2
| 8
| 2
| 12
| 54
| 8
| 26
| 15
| 23
| 20
| 25
| 15
| 22
| 2
| 1
| 1
| 4
|
6,394
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/zoedepth/modeling_zoedepth.py
|
transformers.models.zoedepth.modeling_zoedepth.ZoeDepthTransformerEncoderLayer
|
from ...activations import ACT2FN
from torch import nn
from typing import Optional, Union
import torch
class ZoeDepthTransformerEncoderLayer(nn.Module):
def __init__(self, config, dropout=0.1, activation='relu'):
super().__init__()
hidden_size = config.patch_transformer_hidden_size
intermediate_size = config.patch_transformer_intermediate_size
num_attention_heads = config.patch_transformer_num_attention_heads
self.self_attn = ZoeDepthMultiheadAttention(hidden_size, num_attention_heads, dropout=dropout)
self.linear1 = nn.Linear(hidden_size, intermediate_size)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(intermediate_size, hidden_size)
self.norm1 = nn.LayerNorm(hidden_size)
self.norm2 = nn.LayerNorm(hidden_size)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.activation = ACT2FN[activation]
def forward(self, src, src_mask: Optional[torch.Tensor]=None):
queries = keys = src
src2 = self.self_attn(queries=queries, keys=keys, values=src, attention_mask=src_mask)[0]
src = src + self.dropout1(src2)
src = self.norm1(src)
src2 = self.linear2(self.dropout(self.activation(self.linear1(src))))
src = src + self.dropout2(src2)
src = self.norm2(src)
return src
|
class ZoeDepthTransformerEncoderLayer(nn.Module):
def __init__(self, config, dropout=0.1, activation='relu'):
pass
def forward(self, src, src_mask: Optional[torch.Tensor]=None):
pass
| 3
| 0
| 16
| 3
| 14
| 0
| 1
| 0
| 1
| 3
| 1
| 0
| 2
| 9
| 2
| 12
| 34
| 6
| 28
| 21
| 21
| 0
| 24
| 17
| 21
| 1
| 1
| 0
| 2
|
6,395
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/onnx/config.py
|
transformers.onnx.config.OnnxConfig
|
from collections import OrderedDict
import numpy as np
from typing import TYPE_CHECKING, Any, Callable, Optional, Union
import copy
from .utils import ParameterFormat, compute_effective_axis_dimension, compute_serialized_parameters_size
import dataclasses
import warnings
from packaging import version
from collections.abc import Iterable, Mapping
from abc import ABC, abstractmethod
from ..utils import is_torch_available, is_vision_available, logging
class OnnxConfig(ABC):
"""
Base class for ONNX exportable model describing metadata on how to export the model through the ONNX format.
"""
default_fixed_batch = 2
default_fixed_sequence = 8
default_fixed_num_choices = 4
torch_onnx_minimum_version = version.parse('1.8')
_tasks_to_common_outputs = {'causal-lm': OrderedDict({'logits': {0: 'batch', 1: 'sequence'}}), 'default': OrderedDict({'last_hidden_state': {0: 'batch', 1: 'sequence'}}), 'image-classification': OrderedDict({'logits': {0: 'batch', 1: 'sequence'}}), 'image-segmentation': OrderedDict({'logits': {0: 'batch', 1: 'sequence'}, 'pred_boxes': {0: 'batch', 1: 'sequence'}, 'pred_masks': {0: 'batch', 1: 'sequence'}}), 'masked-im': OrderedDict({'logits': {0: 'batch', 1: 'sequence'}}), 'masked-lm': OrderedDict({'logits': {0: 'batch', 1: 'sequence'}}), 'multiple-choice': OrderedDict({'logits': {0: 'batch'}}), 'object-detection': OrderedDict({'logits': {0: 'batch', 1: 'sequence'}, 'pred_boxes': {0: 'batch', 1: 'sequence'}}), 'question-answering': OrderedDict({'start_logits': {0: 'batch', 1: 'sequence'}, 'end_logits': {0: 'batch', 1: 'sequence'}}), 'semantic-segmentation': OrderedDict({'logits': {0: 'batch', 1: 'num_labels', 2: 'height', 3: 'width'}}), 'seq2seq-lm': OrderedDict({'logits': {0: 'batch', 1: 'decoder_sequence'}}), 'sequence-classification': OrderedDict({'logits': {0: 'batch'}}), 'token-classification': OrderedDict({'logits': {0: 'batch', 1: 'sequence'}}), 'vision2seq-lm': OrderedDict({'logits': {0: 'batch', 1: 'sequence'}}), 'speech2seq-lm': OrderedDict({'logits': {0: 'batch', 1: 'sequence'}})}
def __init__(self, config: 'PretrainedConfig', task: str='default', patching_specs: Optional[list[PatchingSpec]]=None):
self._config = config
if task not in self._tasks_to_common_outputs:
raise ValueError(f'{task} is not a supported task, supported tasks: {self._tasks_to_common_outputs.keys()}')
self.task = task
self._patching_specs = []
for spec in patching_specs if patching_specs is not None else []:
final_spec = spec
if spec.orig_op is None:
final_spec = dataclasses.replace(spec, orig_op=getattr(spec.o, spec.name))
self._patching_specs.append(final_spec)
@classmethod
def from_model_config(cls, config: 'PretrainedConfig', task: str='default') -> 'OnnxConfig':
"""
Instantiate a OnnxConfig for a specific model
Args:
config: The model's configuration to use when exporting to ONNX
Returns:
OnnxConfig for this model
"""
return cls(config, task=task)
@property
@abstractmethod
def inputs(self) -> Mapping[str, Mapping[int, str]]:
"""
Mapping containing the axis definition of the input tensors to provide to the model
Returns:
For each input: its name associated to the axes symbolic name and the axis position within the tensor
"""
raise NotImplementedError()
@property
def outputs(self) -> Mapping[str, Mapping[int, str]]:
"""
Mapping containing the axis definition of the output tensors to provide to the model
Returns:
For each output: its name associated to the axes symbolic name and the axis position within the tensor
"""
common_outputs = self._tasks_to_common_outputs[self.task]
return copy.deepcopy(common_outputs)
@property
def values_override(self) -> Optional[Mapping[str, Any]]:
"""
Dictionary of keys to override in the model's config before exporting
Returns:
Dictionary with the keys (and their corresponding values) to override
"""
if hasattr(self._config, 'use_cache'):
return {'use_cache': False}
return None
@property
def default_batch_size(self) -> int:
"""
The default batch size to use if no other indication
Returns:
Integer > 0
"""
return OnnxConfig.default_fixed_batch
@property
def default_sequence_length(self) -> int:
"""
The default sequence length to use if no other indication
Returns:
Integer > 0
"""
return OnnxConfig.default_fixed_sequence
@property
def default_num_choices(self) -> int:
"""
The default number of choices to use if no other indication
Returns:
Integer > 0
"""
return OnnxConfig.default_fixed_num_choices
@property
def default_onnx_opset(self) -> int:
"""
Which onnx opset to use when exporting the model
Returns:
Integer ONNX Opset version
"""
return DEFAULT_ONNX_OPSET
@property
def atol_for_validation(self) -> float:
"""
What absolute tolerance value to use during model conversion validation.
Returns:
Float absolute tolerance value.
"""
return 1e-05
@property
def is_torch_support_available(self) -> bool:
"""
The minimum PyTorch version required to export the model.
Returns:
`bool`: Whether the installed version of PyTorch is compatible with the model.
"""
if is_torch_available():
from transformers.utils import get_torch_version
return version.parse(get_torch_version()) >= self.torch_onnx_minimum_version
else:
return False
@staticmethod
def use_external_data_format(num_parameters: int) -> bool:
"""
Flag indicating if the model requires using external data format
Args:
num_parameters: Number of parameter on the model
Returns:
True if model.num_parameters() * size_of(float32) >= 2Gb False otherwise
"""
return compute_serialized_parameters_size(num_parameters, ParameterFormat.Float) >= EXTERNAL_DATA_FORMAT_SIZE_LIMIT
def _generate_dummy_images(self, batch_size: int=2, num_channels: int=3, image_height: int=40, image_width: int=40):
images = []
for _ in range(batch_size):
data = np.random.rand(image_height, image_width, num_channels) * 255
images.append(Image.fromarray(data.astype('uint8')).convert('RGB'))
return images
def _generate_dummy_audio(self, batch_size: int=2, sampling_rate: int=22050, time_duration: float=5.0, frequency: int=220):
audio_data = []
for _ in range(batch_size):
t = np.linspace(0, time_duration, int(time_duration * sampling_rate), endpoint=False)
audio_data.append(0.5 * np.sin(2 * np.pi * frequency * t))
return audio_data
def generate_dummy_inputs(self, preprocessor: Union['PreTrainedTokenizerBase', 'FeatureExtractionMixin', 'ImageProcessingMixin'], batch_size: int=-1, seq_length: int=-1, num_choices: int=-1, is_pair: bool=False, num_channels: int=3, image_width: int=40, image_height: int=40, sampling_rate: int=22050, time_duration: float=5.0, frequency: int=220, tokenizer: Optional['PreTrainedTokenizerBase']=None) -> Mapping[str, Any]:
"""
Generate inputs to provide to the ONNX exporter
Args:
preprocessor: ([`PreTrainedTokenizerBase`], [`FeatureExtractionMixin`], or [`ImageProcessingMixin`]):
The preprocessor associated with this model configuration.
batch_size (`int`, *optional*, defaults to -1):
The batch size to export the model for (-1 means dynamic axis).
num_choices (`int`, *optional*, defaults to -1):
The number of candidate answers provided for multiple choice task (-1 means dynamic axis).
seq_length (`int`, *optional*, defaults to -1):
The sequence length to export the model for (-1 means dynamic axis).
is_pair (`bool`, *optional*, defaults to `False`):
Indicate if the input is a pair (sentence 1, sentence 2)
num_channels (`int`, *optional*, defaults to 3):
The number of channels of the generated images.
image_width (`int`, *optional*, defaults to 40):
The width of the generated images.
image_height (`int`, *optional*, defaults to 40):
The height of the generated images.
sampling_rate (`int`, *optional* defaults to 22050)
The sampling rate for audio data generation.
time_duration (`float`, *optional* defaults to 5.0)
Total seconds of sampling for audio data generation.
frequency (`int`, *optional* defaults to 220)
The desired natural frequency of generated audio.
Returns:
Mapping[str, Tensor] holding the kwargs to provide to the model's forward function
"""
from ..feature_extraction_utils import FeatureExtractionMixin
from ..image_processing_utils import ImageProcessingMixin
from ..tokenization_utils_base import PreTrainedTokenizerBase
if isinstance(preprocessor, PreTrainedTokenizerBase) and tokenizer is not None:
raise ValueError('You cannot provide both a tokenizer and a preprocessor to generate dummy inputs.')
if tokenizer is not None:
warnings.warn('The `tokenizer` argument is deprecated and will be removed in version 5 of Transformers. Use `preprocessor` instead.', FutureWarning)
logger.warning('Overwriting the `preprocessor` argument with `tokenizer` to generate dummy inputs.')
preprocessor = tokenizer
if isinstance(preprocessor, PreTrainedTokenizerBase):
batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch, num_token_to_add=0)
token_to_add = preprocessor.num_special_tokens_to_add(is_pair)
seq_length = compute_effective_axis_dimension(seq_length, fixed_dimension=OnnxConfig.default_fixed_sequence, num_token_to_add=token_to_add)
input_token = preprocessor.unk_token if preprocessor.unk_token is not None and len(preprocessor.unk_token) > 0 else '0'
dummy_input = [' '.join([input_token]) * seq_length] * batch_size
if self.task == 'multiple-choice':
num_choices = compute_effective_axis_dimension(num_choices, fixed_dimension=OnnxConfig.default_fixed_num_choices, num_token_to_add=0)
dummy_input = dummy_input * num_choices
tokenized_input = preprocessor(dummy_input, text_pair=dummy_input)
for k, v in tokenized_input.items():
tokenized_input[k] = [v[i:i + num_choices] for i in range(0, len(v), num_choices)]
return dict(tokenized_input.convert_to_tensors(tensor_type='pt'))
return dict(preprocessor(dummy_input, return_tensors='pt'))
elif isinstance(preprocessor, ImageProcessingMixin):
if preprocessor.model_input_names[0] != 'pixel_values':
raise ValueError(f'The `preprocessor` is an image processor ({preprocessor.__class__.__name__}) and expects `model_input_names[0]` to be "pixel_values", but got {preprocessor.model_input_names[0]}')
batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
dummy_input = self._generate_dummy_images(batch_size, num_channels, image_height, image_width)
return dict(preprocessor(images=dummy_input, return_tensors='pt'))
elif isinstance(preprocessor, FeatureExtractionMixin) and preprocessor.model_input_names[0] == 'pixel_values':
batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
dummy_input = self._generate_dummy_images(batch_size, num_channels, image_height, image_width)
return dict(preprocessor(images=dummy_input, return_tensors='pt'))
elif isinstance(preprocessor, FeatureExtractionMixin) and preprocessor.model_input_names[0] == 'input_features':
batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
dummy_input = self._generate_dummy_audio(batch_size, sampling_rate, time_duration, frequency)
return dict(preprocessor(dummy_input, return_tensors='pt'))
else:
raise ValueError('Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.')
def generate_dummy_inputs_onnxruntime(self, reference_model_inputs: Mapping[str, Any]) -> Mapping[str, Any]:
"""
Generate inputs for ONNX Runtime using the reference model inputs. Override this to run inference with seq2seq
models which have the encoder and decoder exported as separate ONNX files.
Args:
reference_model_inputs ([`Mapping[str, Tensor]`):
Reference inputs for the model.
Returns:
`Mapping[str, Tensor]`: The mapping holding the kwargs to provide to the model's forward function
"""
return reference_model_inputs
def patch_ops(self):
for spec in self._patching_specs:
custom_op = spec.custom_op if spec.op_wrapper is None else spec.op_wrapper(spec.custom_op)
setattr(spec.o, spec.name, custom_op)
def restore_ops(self):
for spec in self._patching_specs:
orig_op = spec.orig_op if spec.op_wrapper is None else spec.op_wrapper(spec.orig_op)
setattr(spec.o, spec.name, orig_op)
@classmethod
def flatten_output_collection_property(cls, name: str, field: Iterable[Any]) -> dict[str, Any]:
"""
Flatten any potential nested structure expanding the name of the field with the index of the element within the
structure.
Args:
name: The name of the nested structure
field: The structure to, potentially, be flattened
Returns:
(dict[str, Any]): Outputs with flattened structure and key mapping this new structure.
"""
from itertools import chain
return {f'{name}.{idx}': item for idx, item in enumerate(chain.from_iterable(field))}
|
class OnnxConfig(ABC):
'''
Base class for ONNX exportable model describing metadata on how to export the model through the ONNX format.
'''
def __init__(self, config: 'PretrainedConfig', task: str='default', patching_specs: Optional[list[PatchingSpec]]=None):
pass
@classmethod
def from_model_config(cls, config: 'PretrainedConfig', task: str='default') -> 'OnnxConfig':
'''
Instantiate a OnnxConfig for a specific model
Args:
config: The model's configuration to use when exporting to ONNX
Returns:
OnnxConfig for this model
'''
pass
@property
@abstractmethod
def inputs(self) -> Mapping[str, Mapping[int, str]]:
'''
Mapping containing the axis definition of the input tensors to provide to the model
Returns:
For each input: its name associated to the axes symbolic name and the axis position within the tensor
'''
pass
@property
def outputs(self) -> Mapping[str, Mapping[int, str]]:
'''
Mapping containing the axis definition of the output tensors to provide to the model
Returns:
For each output: its name associated to the axes symbolic name and the axis position within the tensor
'''
pass
@property
def values_override(self) -> Optional[Mapping[str, Any]]:
'''
Dictionary of keys to override in the model's config before exporting
Returns:
Dictionary with the keys (and their corresponding values) to override
'''
pass
@property
def default_batch_size(self) -> int:
'''
The default batch size to use if no other indication
Returns:
Integer > 0
'''
pass
@property
def default_sequence_length(self) -> int:
'''
The default sequence length to use if no other indication
Returns:
Integer > 0
'''
pass
@property
def default_num_choices(self) -> int:
'''
The default number of choices to use if no other indication
Returns:
Integer > 0
'''
pass
@property
def default_onnx_opset(self) -> int:
'''
Which onnx opset to use when exporting the model
Returns:
Integer ONNX Opset version
'''
pass
@property
def atol_for_validation(self) -> float:
'''
What absolute tolerance value to use during model conversion validation.
Returns:
Float absolute tolerance value.
'''
pass
@property
def is_torch_support_available(self) -> bool:
'''
The minimum PyTorch version required to export the model.
Returns:
`bool`: Whether the installed version of PyTorch is compatible with the model.
'''
pass
@staticmethod
def use_external_data_format(num_parameters: int) -> bool:
'''
Flag indicating if the model requires using external data format
Args:
num_parameters: Number of parameter on the model
Returns:
True if model.num_parameters() * size_of(float32) >= 2Gb False otherwise
'''
pass
def _generate_dummy_images(self, batch_size: int=2, num_channels: int=3, image_height: int=40, image_width: int=40):
pass
def _generate_dummy_audio(self, batch_size: int=2, sampling_rate: int=22050, time_duration: float=5.0, frequency: int=220):
pass
def generate_dummy_inputs(self, preprocessor: Union['PreTrainedTokenizerBase', 'FeatureExtractionMixin', 'ImageProcessingMixin'], batch_size: int=-1, seq_length: int=-1, num_choices: int=-1, is_pair: bool=False, num_channels: int=3, image_width: int=40, image_height: int=40, sampling_rate: int=22050, time_duration: float=5.0, frequency: int=220, tokenizer: Optional['PreTrainedTokenizerBase']=None) -> Mapping[str, Any]:
'''
Generate inputs to provide to the ONNX exporter
Args:
preprocessor: ([`PreTrainedTokenizerBase`], [`FeatureExtractionMixin`], or [`ImageProcessingMixin`]):
The preprocessor associated with this model configuration.
batch_size (`int`, *optional*, defaults to -1):
The batch size to export the model for (-1 means dynamic axis).
num_choices (`int`, *optional*, defaults to -1):
The number of candidate answers provided for multiple choice task (-1 means dynamic axis).
seq_length (`int`, *optional*, defaults to -1):
The sequence length to export the model for (-1 means dynamic axis).
is_pair (`bool`, *optional*, defaults to `False`):
Indicate if the input is a pair (sentence 1, sentence 2)
num_channels (`int`, *optional*, defaults to 3):
The number of channels of the generated images.
image_width (`int`, *optional*, defaults to 40):
The width of the generated images.
image_height (`int`, *optional*, defaults to 40):
The height of the generated images.
sampling_rate (`int`, *optional* defaults to 22050)
The sampling rate for audio data generation.
time_duration (`float`, *optional* defaults to 5.0)
Total seconds of sampling for audio data generation.
frequency (`int`, *optional* defaults to 220)
The desired natural frequency of generated audio.
Returns:
Mapping[str, Tensor] holding the kwargs to provide to the model's forward function
'''
pass
def generate_dummy_inputs_onnxruntime(self, reference_model_inputs: Mapping[str, Any]) -> Mapping[str, Any]:
'''
Generate inputs for ONNX Runtime using the reference model inputs. Override this to run inference with seq2seq
models which have the encoder and decoder exported as separate ONNX files.
Args:
reference_model_inputs ([`Mapping[str, Tensor]`):
Reference inputs for the model.
Returns:
`Mapping[str, Tensor]`: The mapping holding the kwargs to provide to the model's forward function
'''
pass
def patch_ops(self):
pass
def restore_ops(self):
pass
@classmethod
def flatten_output_collection_property(cls, name: str, field: Iterable[Any]) -> dict[str, Any]:
'''
Flatten any potential nested structure expanding the name of the field with the index of the element within the
structure.
Args:
name: The name of the nested structure
field: The structure to, potentially, be flattened
Returns:
(dict[str, Any]): Outputs with flattened structure and key mapping this new structure.
'''
pass
| 33
| 15
| 16
| 2
| 8
| 6
| 2
| 0.61
| 1
| 17
| 5
| 2
| 16
| 3
| 19
| 39
| 373
| 49
| 201
| 82
| 144
| 123
| 106
| 51
| 81
| 11
| 4
| 3
| 41
|
6,396
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/onnx/config.py
|
transformers.onnx.config.OnnxConfigWithPast
|
from abc import ABC, abstractmethod
from typing import TYPE_CHECKING, Any, Callable, Optional, Union
from collections.abc import Iterable, Mapping
from ..utils import is_torch_available, is_vision_available, logging
class OnnxConfigWithPast(OnnxConfig, ABC):
def __init__(self, config: 'PretrainedConfig', task: str='default', patching_specs: Optional[list[PatchingSpec]]=None, use_past: bool=False):
super().__init__(config, task=task, patching_specs=patching_specs)
self.use_past = use_past
@classmethod
def with_past(cls, config: 'PretrainedConfig', task: str='default') -> 'OnnxConfigWithPast':
"""
Instantiate a OnnxConfig with `use_past` attribute set to True
Args:
config: The underlying model's config to use when exporting to ONNX
Returns:
OnnxConfig with `.use_past = True`
"""
return cls(config, task=task, use_past=True)
@property
def outputs(self) -> Mapping[str, Mapping[int, str]]:
common_outputs = super().outputs
if self.use_past:
self.fill_with_past_key_values_(common_outputs, direction='outputs')
return common_outputs
@property
def values_override(self) -> Optional[Mapping[str, Any]]:
if hasattr(self._config, 'use_cache'):
return {'use_cache': self.use_past}
return None
@property
def num_layers(self) -> int:
"""
The number of layers attribute retrieved from the model config. Override this for model configs where the
number of layers attribute is not called `num_layers`.
"""
if not hasattr(self._config, 'num_layers'):
raise AttributeError('could not find the number of layers attribute in the model configuration, override the num_layers property of the model OnnxConfig to solve this')
return self._config.num_layers
@property
def num_attention_heads(self) -> int:
"""
The number of attention heads attribute retrieved from the model config. Override this for model configs where
the number of attention heads attribute is not called `num_attention_heads`.
"""
if not hasattr(self._config, 'num_attention_heads'):
raise AttributeError('could not find the number of attention heads attribute in the model configuration, override the num_attention_heads property of the model OnnxConfig to solve this')
return self._config.num_attention_heads
def generate_dummy_inputs(self, tokenizer: 'PreTrainedTokenizerBase', batch_size: int=-1, seq_length: int=-1, is_pair: bool=False) -> Mapping[str, Any]:
common_inputs = super().generate_dummy_inputs(tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair)
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.')
else:
import torch
batch, seqlen = common_inputs['input_ids'].shape
past_key_values_length = seqlen + 2
shape = (batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads)
if 'attention_mask' in common_inputs:
mask_dtype = common_inputs['attention_mask'].dtype
common_inputs['attention_mask'] = torch.cat([common_inputs['attention_mask'], torch.ones(batch, past_key_values_length, dtype=mask_dtype)], dim=1)
common_inputs['past_key_values'] = []
for _ in range(self.num_layers):
common_inputs['past_key_values'].append((torch.zeros(shape), torch.zeros(shape)))
return common_inputs
def fill_with_past_key_values_(self, inputs_or_outputs: Mapping[str, Mapping[int, str]], direction: str, inverted_values_shape: bool=False):
"""
Fill the input_or_outputs mapping with past_key_values dynamic axes considering.
Args:
inputs_or_outputs: The mapping to fill.
direction: either "inputs" or "outputs", it specifies whether input_or_outputs is the input mapping or the
output mapping, this is important for axes naming.
inverted_values_shape:
If `True`, store values on dynamic axis 1, else on axis 2.
"""
if direction not in ['inputs', 'outputs']:
raise ValueError(f'direction must either be "inputs" or "outputs", but {direction} was given')
name = 'past_key_values' if direction == 'inputs' else 'present'
for i in range(self.num_layers):
inputs_or_outputs[f'{name}.{i}.key'] = {0: 'batch', 2: 'past_sequence + sequence'}
if inverted_values_shape:
inputs_or_outputs[f'{name}.{i}.value'] = {0: 'batch', 1: 'past_sequence + sequence'}
else:
inputs_or_outputs[f'{name}.{i}.value'] = {0: 'batch', 2: 'past_sequence + sequence'}
def _flatten_past_key_values_(self, flattened_output, name, idx, t):
flattened_output[f'{name}.{idx}.key'] = t[0]
flattened_output[f'{name}.{idx}.value'] = t[1]
def flatten_output_collection_property(self, name: str, field: Iterable[Any]) -> dict[str, Any]:
flattened_output = {}
if name in ['present', 'past_key_values']:
for idx, t in enumerate(field):
self._flatten_past_key_values_(flattened_output, name, idx, t)
else:
flattened_output = super().flatten_output_collection_property(name, field)
return flattened_output
|
class OnnxConfigWithPast(OnnxConfig, ABC):
def __init__(self, config: 'PretrainedConfig', task: str='default', patching_specs: Optional[list[PatchingSpec]]=None, use_past: bool=False):
pass
@classmethod
def with_past(cls, config: 'PretrainedConfig', task: str='default') -> 'OnnxConfigWithPast':
'''
Instantiate a OnnxConfig with `use_past` attribute set to True
Args:
config: The underlying model's config to use when exporting to ONNX
Returns:
OnnxConfig with `.use_past = True`
'''
pass
@property
def outputs(self) -> Mapping[str, Mapping[int, str]]:
pass
@property
def values_override(self) -> Optional[Mapping[str, Any]]:
pass
@property
def num_layers(self) -> int:
'''
The number of layers attribute retrieved from the model config. Override this for model configs where the
number of layers attribute is not called `num_layers`.
'''
pass
@property
def num_attention_heads(self) -> int:
'''
The number of attention heads attribute retrieved from the model config. Override this for model configs where
the number of attention heads attribute is not called `num_attention_heads`.
'''
pass
def generate_dummy_inputs(self, tokenizer: 'PreTrainedTokenizerBase', batch_size: int=-1, seq_length: int=-1, is_pair: bool=False) -> Mapping[str, Any]:
pass
def fill_with_past_key_values_(self, inputs_or_outputs: Mapping[str, Mapping[int, str]], direction: str, inverted_values_shape: bool=False):
'''
Fill the input_or_outputs mapping with past_key_values dynamic axes considering.
Args:
inputs_or_outputs: The mapping to fill.
direction: either "inputs" or "outputs", it specifies whether input_or_outputs is the input mapping or the
output mapping, this is important for axes naming.
inverted_values_shape:
If `True`, store values on dynamic axis 1, else on axis 2.
'''
pass
def _flatten_past_key_values_(self, flattened_output, name, idx, t):
pass
def flatten_output_collection_property(self, name: str, field: Iterable[Any]) -> dict[str, Any]:
pass
| 16
| 4
| 13
| 1
| 9
| 3
| 2
| 0.27
| 2
| 10
| 1
| 1
| 9
| 1
| 10
| 49
| 145
| 22
| 97
| 44
| 65
| 26
| 58
| 24
| 46
| 5
| 5
| 2
| 24
|
6,397
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/onnx/config.py
|
transformers.onnx.config.OnnxSeq2SeqConfigWithPast
|
from collections.abc import Iterable, Mapping
from typing import TYPE_CHECKING, Any, Callable, Optional, Union
from ..utils import is_torch_available, is_vision_available, logging
class OnnxSeq2SeqConfigWithPast(OnnxConfigWithPast):
@property
def outputs(self) -> Mapping[str, Mapping[int, str]]:
common_outputs = super(OnnxConfigWithPast, self).outputs
for name, axes_names in common_outputs.items():
sequence_name = 'encoder_sequence' if 'encoder' in name else 'decoder_sequence'
for axis_idx, name in axes_names.items():
if 'sequence' in name:
axes_names[axis_idx] = sequence_name
else:
axes_names[axis_idx] = name
if self.use_past:
self.fill_with_past_key_values_(common_outputs, direction='outputs')
return common_outputs
@property
def num_layers(self) -> tuple[int]:
try:
num_layers = super().num_layers
num_layers = (num_layers, num_layers)
except AttributeError:
if hasattr(self._config, 'encoder_layers') and hasattr(self._config, 'decoder_layers'):
num_layers = (self._config.encoder_layers, self._config.decoder_layers)
else:
raise AttributeError('could not find the number of encoder and decoder layers attributes in the model configuration, override the num_layers property of the model OnnxConfig to solve this')
return num_layers
@property
def num_attention_heads(self) -> tuple[int]:
try:
num_attention_heads = super().num_attention_heads
num_attention_heads = (num_attention_heads, num_attention_heads)
except AttributeError:
if hasattr(self._config, 'encoder_attention_heads') and hasattr(self._config, 'decoder_attention_heads'):
num_attention_heads = (self._config.encoder_attention_heads, self._config.decoder_attention_heads)
else:
raise AttributeError('could not find the number of attention heads for the encoder and the decoder attributes in the model configuration, override the num_attention_heads property of the model OnnxConfig to solve this')
return num_attention_heads
def generate_dummy_inputs(self, tokenizer: Optional['PreTrainedTokenizerBase'], batch_size: int=-1, seq_length: int=-1, is_pair: bool=False) -> Mapping[str, Any]:
encoder_inputs = super(OnnxConfigWithPast, self).generate_dummy_inputs(tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair)
decoder_seq_length = seq_length if not self.use_past else 1
decoder_inputs = super(OnnxConfigWithPast, self).generate_dummy_inputs(tokenizer, batch_size=batch_size, seq_length=decoder_seq_length, is_pair=is_pair)
decoder_inputs = {f'decoder_{name}': tensor for name, tensor in decoder_inputs.items()}
common_inputs = dict(**encoder_inputs, **decoder_inputs)
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.')
else:
import torch
batch = common_inputs['input_ids'].shape[0]
encoder_seq_length = common_inputs['input_ids'].shape[1]
decoder_seq_length = common_inputs['decoder_input_ids'].shape[1]
num_encoder_attention_heads, num_decoder_attention_heads = self.num_attention_heads
encoder_shape = (batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads)
decoder_shape = (batch, num_decoder_attention_heads, decoder_seq_length + 3, self._config.hidden_size // num_decoder_attention_heads)
common_inputs['past_key_values'] = []
num_encoder_layers, num_decoder_layers = self.num_layers
min_num_layers = min(num_encoder_layers, num_decoder_layers)
max_num_layers = max(num_encoder_layers, num_decoder_layers) - min_num_layers
remaining_side_name = 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder'
for _ in range(min_num_layers):
common_inputs['past_key_values'].append((torch.zeros(decoder_shape), torch.zeros(decoder_shape), torch.zeros(encoder_shape), torch.zeros(encoder_shape)))
shape = encoder_shape if remaining_side_name == 'encoder' else decoder_shape
for _ in range(min_num_layers, max_num_layers):
common_inputs['past_key_values'].append((torch.zeros(shape), torch.zeros(shape)))
return common_inputs
def fill_with_past_key_values_(self, inputs_or_outputs: Mapping[str, Mapping[int, str]], direction: str):
if direction not in ['inputs', 'outputs']:
raise ValueError(f'direction must either be "inputs" or "outputs", but {direction} was given')
name = 'past_key_values' if direction == 'inputs' else 'present'
num_encoder_layers, num_decoder_layers = self.num_layers
min_num_layers = min(num_encoder_layers, num_decoder_layers)
max_num_layers = max(num_encoder_layers, num_decoder_layers) - min_num_layers
remaining_side_name = 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder'
encoder_sequence = 'past_encoder_sequence'
decoder_sequence = 'past_decoder_sequence' if direction == 'inputs' else 'past_decoder_sequence + sequence'
for i in range(min_num_layers):
inputs_or_outputs[f'{name}.{i}.decoder.key'] = {0: 'batch', 2: decoder_sequence}
inputs_or_outputs[f'{name}.{i}.decoder.value'] = {0: 'batch', 2: decoder_sequence}
inputs_or_outputs[f'{name}.{i}.encoder.key'] = {0: 'batch', 2: encoder_sequence}
inputs_or_outputs[f'{name}.{i}.encoder.value'] = {0: 'batch', 2: encoder_sequence}
for i in range(min_num_layers, max_num_layers):
if remaining_side_name == 'encoder':
axes_info = {0: 'batch', 2: encoder_sequence}
else:
axes_info = {0: 'batch', 2: decoder_sequence}
inputs_or_outputs[f'{name}.{i}.{remaining_side_name}.key'] = axes_info
def _flatten_past_key_values_(self, flattened_output, name, idx, t):
flattened_output[f'{name}.{idx}.decoder.key'] = t[0]
flattened_output[f'{name}.{idx}.decoder.value'] = t[1]
flattened_output[f'{name}.{idx}.encoder.key'] = t[2]
flattened_output[f'{name}.{idx}.encoder.value'] = t[3]
|
class OnnxSeq2SeqConfigWithPast(OnnxConfigWithPast):
@property
def outputs(self) -> Mapping[str, Mapping[int, str]]:
pass
@property
def num_layers(self) -> tuple[int]:
pass
@property
def num_attention_heads(self) -> tuple[int]:
pass
def generate_dummy_inputs(self, tokenizer: Optional['PreTrainedTokenizerBase'], batch_size: int=-1, seq_length: int=-1, is_pair: bool=False) -> Mapping[str, Any]:
pass
def fill_with_past_key_values_(self, inputs_or_outputs: Mapping[str, Mapping[int, str]], direction: str):
pass
def _flatten_past_key_values_(self, flattened_output, name, idx, t):
pass
| 10
| 0
| 24
| 2
| 20
| 2
| 5
| 0.07
| 1
| 9
| 0
| 0
| 6
| 0
| 6
| 55
| 152
| 18
| 125
| 48
| 107
| 9
| 82
| 38
| 74
| 8
| 6
| 3
| 29
|
6,398
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/onnx/features.py
|
transformers.onnx.features.FeaturesManager
|
from functools import partial, reduce
from .. import PretrainedConfig, is_torch_available
from .config import OnnxConfig
from typing import TYPE_CHECKING, Callable, Optional
class FeaturesManager:
_TASKS_TO_AUTOMODELS = {}
if is_torch_available():
_TASKS_TO_AUTOMODELS = {'default': AutoModel, 'masked-lm': AutoModelForMaskedLM, 'causal-lm': AutoModelForCausalLM, 'seq2seq-lm': AutoModelForSeq2SeqLM, 'sequence-classification': AutoModelForSequenceClassification, 'token-classification': AutoModelForTokenClassification, 'multiple-choice': AutoModelForMultipleChoice, 'object-detection': AutoModelForObjectDetection, 'question-answering': AutoModelForQuestionAnswering, 'image-classification': AutoModelForImageClassification, 'image-segmentation': AutoModelForImageSegmentation, 'masked-im': AutoModelForMaskedImageModeling, 'semantic-segmentation': AutoModelForSemanticSegmentation, 'vision2seq-lm': AutoModelForVision2Seq, 'speech2seq-lm': AutoModelForSpeechSeq2Seq}
_SUPPORTED_MODEL_TYPE = {'albert': supported_features_mapping('default', 'masked-lm', 'sequence-classification', 'multiple-choice', 'token-classification', 'question-answering', onnx_config_cls='models.albert.AlbertOnnxConfig'), 'bart': supported_features_mapping('default', 'default-with-past', 'causal-lm', 'causal-lm-with-past', 'seq2seq-lm', 'seq2seq-lm-with-past', 'sequence-classification', 'question-answering', onnx_config_cls='models.bart.BartOnnxConfig'), 'beit': supported_features_mapping('default', 'image-classification', onnx_config_cls='models.beit.BeitOnnxConfig'), 'bert': supported_features_mapping('default', 'masked-lm', 'causal-lm', 'sequence-classification', 'multiple-choice', 'token-classification', 'question-answering', onnx_config_cls='models.bert.BertOnnxConfig'), 'big-bird': supported_features_mapping('default', 'masked-lm', 'causal-lm', 'sequence-classification', 'multiple-choice', 'token-classification', 'question-answering', onnx_config_cls='models.big_bird.BigBirdOnnxConfig'), 'bigbird-pegasus': supported_features_mapping('default', 'default-with-past', 'causal-lm', 'causal-lm-with-past', 'seq2seq-lm', 'seq2seq-lm-with-past', 'sequence-classification', 'question-answering', onnx_config_cls='models.bigbird_pegasus.BigBirdPegasusOnnxConfig'), 'blenderbot': supported_features_mapping('default', 'default-with-past', 'causal-lm', 'causal-lm-with-past', 'seq2seq-lm', 'seq2seq-lm-with-past', onnx_config_cls='models.blenderbot.BlenderbotOnnxConfig'), 'blenderbot-small': supported_features_mapping('default', 'default-with-past', 'causal-lm', 'causal-lm-with-past', 'seq2seq-lm', 'seq2seq-lm-with-past', onnx_config_cls='models.blenderbot_small.BlenderbotSmallOnnxConfig'), 'bloom': supported_features_mapping('default', 'default-with-past', 'causal-lm', 'causal-lm-with-past', 'sequence-classification', 'token-classification', onnx_config_cls='models.bloom.BloomOnnxConfig'), 'camembert': supported_features_mapping('default', 'masked-lm', 'causal-lm', 'sequence-classification', 'multiple-choice', 'token-classification', 'question-answering', onnx_config_cls='models.camembert.CamembertOnnxConfig'), 'clip': supported_features_mapping('default', onnx_config_cls='models.clip.CLIPOnnxConfig'), 'codegen': supported_features_mapping('default', 'causal-lm', onnx_config_cls='models.codegen.CodeGenOnnxConfig'), 'convbert': supported_features_mapping('default', 'masked-lm', 'sequence-classification', 'multiple-choice', 'token-classification', 'question-answering', onnx_config_cls='models.convbert.ConvBertOnnxConfig'), 'convnext': supported_features_mapping('default', 'image-classification', onnx_config_cls='models.convnext.ConvNextOnnxConfig'), 'data2vec-text': supported_features_mapping('default', 'masked-lm', 'sequence-classification', 'multiple-choice', 'token-classification', 'question-answering', onnx_config_cls='models.data2vec.Data2VecTextOnnxConfig'), 'data2vec-vision': supported_features_mapping('default', 'image-classification', onnx_config_cls='models.data2vec.Data2VecVisionOnnxConfig'), 'deberta': supported_features_mapping('default', 'masked-lm', 'sequence-classification', 'token-classification', 'question-answering', onnx_config_cls='models.deberta.DebertaOnnxConfig'), 'deberta-v2': supported_features_mapping('default', 'masked-lm', 'sequence-classification', 'multiple-choice', 'token-classification', 'question-answering', onnx_config_cls='models.deberta_v2.DebertaV2OnnxConfig'), 'deit': supported_features_mapping('default', 'image-classification', onnx_config_cls='models.deit.DeiTOnnxConfig'), 'detr': supported_features_mapping('default', 'object-detection', 'image-segmentation', onnx_config_cls='models.detr.DetrOnnxConfig'), 'distilbert': supported_features_mapping('default', 'masked-lm', 'sequence-classification', 'multiple-choice', 'token-classification', 'question-answering', onnx_config_cls='models.distilbert.DistilBertOnnxConfig'), 'electra': supported_features_mapping('default', 'masked-lm', 'causal-lm', 'sequence-classification', 'multiple-choice', 'token-classification', 'question-answering', onnx_config_cls='models.electra.ElectraOnnxConfig'), 'flaubert': supported_features_mapping('default', 'masked-lm', 'causal-lm', 'sequence-classification', 'multiple-choice', 'token-classification', 'question-answering', onnx_config_cls='models.flaubert.FlaubertOnnxConfig'), 'gpt2': supported_features_mapping('default', 'default-with-past', 'causal-lm', 'causal-lm-with-past', 'sequence-classification', 'token-classification', onnx_config_cls='models.gpt2.GPT2OnnxConfig'), 'gptj': supported_features_mapping('default', 'default-with-past', 'causal-lm', 'causal-lm-with-past', 'question-answering', 'sequence-classification', onnx_config_cls='models.gptj.GPTJOnnxConfig'), 'gpt-neo': supported_features_mapping('default', 'default-with-past', 'causal-lm', 'causal-lm-with-past', 'sequence-classification', onnx_config_cls='models.gpt_neo.GPTNeoOnnxConfig'), 'groupvit': supported_features_mapping('default', onnx_config_cls='models.groupvit.GroupViTOnnxConfig'), 'ibert': supported_features_mapping('default', 'masked-lm', 'sequence-classification', 'multiple-choice', 'token-classification', 'question-answering', onnx_config_cls='models.ibert.IBertOnnxConfig'), 'imagegpt': supported_features_mapping('default', 'image-classification', onnx_config_cls='models.imagegpt.ImageGPTOnnxConfig'), 'layoutlm': supported_features_mapping('default', 'masked-lm', 'sequence-classification', 'token-classification', onnx_config_cls='models.layoutlm.LayoutLMOnnxConfig'), 'layoutlmv3': supported_features_mapping('default', 'question-answering', 'sequence-classification', 'token-classification', onnx_config_cls='models.layoutlmv3.LayoutLMv3OnnxConfig'), 'levit': supported_features_mapping('default', 'image-classification', onnx_config_cls='models.levit.LevitOnnxConfig'), 'longt5': supported_features_mapping('default', 'default-with-past', 'seq2seq-lm', 'seq2seq-lm-with-past', onnx_config_cls='models.longt5.LongT5OnnxConfig'), 'longformer': supported_features_mapping('default', 'masked-lm', 'multiple-choice', 'question-answering', 'sequence-classification', 'token-classification', onnx_config_cls='models.longformer.LongformerOnnxConfig'), 'marian': supported_features_mapping('default', 'default-with-past', 'seq2seq-lm', 'seq2seq-lm-with-past', 'causal-lm', 'causal-lm-with-past', onnx_config_cls='models.marian.MarianOnnxConfig'), 'mbart': supported_features_mapping('default', 'default-with-past', 'causal-lm', 'causal-lm-with-past', 'seq2seq-lm', 'seq2seq-lm-with-past', 'sequence-classification', 'question-answering', onnx_config_cls='models.mbart.MBartOnnxConfig'), 'mobilebert': supported_features_mapping('default', 'masked-lm', 'sequence-classification', 'multiple-choice', 'token-classification', 'question-answering', onnx_config_cls='models.mobilebert.MobileBertOnnxConfig'), 'mobilenet-v1': supported_features_mapping('default', 'image-classification', onnx_config_cls='models.mobilenet_v1.MobileNetV1OnnxConfig'), 'mobilenet-v2': supported_features_mapping('default', 'image-classification', onnx_config_cls='models.mobilenet_v2.MobileNetV2OnnxConfig'), 'mobilevit': supported_features_mapping('default', 'image-classification', onnx_config_cls='models.mobilevit.MobileViTOnnxConfig'), 'mt5': supported_features_mapping('default', 'default-with-past', 'seq2seq-lm', 'seq2seq-lm-with-past', onnx_config_cls='models.mt5.MT5OnnxConfig'), 'm2m-100': supported_features_mapping('default', 'default-with-past', 'seq2seq-lm', 'seq2seq-lm-with-past', onnx_config_cls='models.m2m_100.M2M100OnnxConfig'), 'owlvit': supported_features_mapping('default', onnx_config_cls='models.owlvit.OwlViTOnnxConfig'), 'perceiver': supported_features_mapping('image-classification', 'masked-lm', 'sequence-classification', onnx_config_cls='models.perceiver.PerceiverOnnxConfig'), 'poolformer': supported_features_mapping('default', 'image-classification', onnx_config_cls='models.poolformer.PoolFormerOnnxConfig'), 'rembert': supported_features_mapping('default', 'masked-lm', 'causal-lm', 'sequence-classification', 'multiple-choice', 'token-classification', 'question-answering', onnx_config_cls='models.rembert.RemBertOnnxConfig'), 'resnet': supported_features_mapping('default', 'image-classification', onnx_config_cls='models.resnet.ResNetOnnxConfig'), 'roberta': supported_features_mapping('default', 'masked-lm', 'causal-lm', 'sequence-classification', 'multiple-choice', 'token-classification', 'question-answering', onnx_config_cls='models.roberta.RobertaOnnxConfig'), 'roformer': supported_features_mapping('default', 'masked-lm', 'causal-lm', 'sequence-classification', 'token-classification', 'multiple-choice', 'question-answering', 'token-classification', onnx_config_cls='models.roformer.RoFormerOnnxConfig'), 'segformer': supported_features_mapping('default', 'image-classification', 'semantic-segmentation', onnx_config_cls='models.segformer.SegformerOnnxConfig'), 'squeezebert': supported_features_mapping('default', 'masked-lm', 'sequence-classification', 'multiple-choice', 'token-classification', 'question-answering', onnx_config_cls='models.squeezebert.SqueezeBertOnnxConfig'), 'swin': supported_features_mapping('default', 'image-classification', onnx_config_cls='models.swin.SwinOnnxConfig'), 't5': supported_features_mapping('default', 'default-with-past', 'seq2seq-lm', 'seq2seq-lm-with-past', onnx_config_cls='models.t5.T5OnnxConfig'), 'vision-encoder-decoder': supported_features_mapping('vision2seq-lm', onnx_config_cls='models.vision_encoder_decoder.VisionEncoderDecoderOnnxConfig'), 'vit': supported_features_mapping('default', 'image-classification', onnx_config_cls='models.vit.ViTOnnxConfig'), 'whisper': supported_features_mapping('default', 'default-with-past', 'speech2seq-lm', 'speech2seq-lm-with-past', onnx_config_cls='models.whisper.WhisperOnnxConfig'), 'xlm': supported_features_mapping('default', 'masked-lm', 'causal-lm', 'sequence-classification', 'multiple-choice', 'token-classification', 'question-answering', onnx_config_cls='models.xlm.XLMOnnxConfig'), 'xlm-roberta': supported_features_mapping('default', 'masked-lm', 'causal-lm', 'sequence-classification', 'multiple-choice', 'token-classification', 'question-answering', onnx_config_cls='models.xlm_roberta.XLMRobertaOnnxConfig'), 'yolos': supported_features_mapping('default', 'object-detection', onnx_config_cls='models.yolos.YolosOnnxConfig')}
AVAILABLE_FEATURES = sorted(reduce(lambda s1, s2: s1 | s2, (v.keys() for v in _SUPPORTED_MODEL_TYPE.values())))
@staticmethod
def get_supported_features_for_model_type(model_type: str, model_name: Optional[str]=None) -> dict[str, Callable[[PretrainedConfig], OnnxConfig]]:
"""
Tries to retrieve the feature -> OnnxConfig constructor map from the model type.
Args:
model_type (`str`):
The model type to retrieve the supported features for.
model_name (`str`, *optional*):
The name attribute of the model object, only used for the exception message.
Returns:
The dictionary mapping each feature to a corresponding OnnxConfig constructor.
"""
model_type = model_type.lower()
if model_type not in FeaturesManager._SUPPORTED_MODEL_TYPE:
model_type_and_model_name = f'{model_type} ({model_name})' if model_name else model_type
raise KeyError(f'{model_type_and_model_name} is not supported yet. Only {list(FeaturesManager._SUPPORTED_MODEL_TYPE.keys())} are supported. If you want to support {model_type} please propose a PR or open up an issue.')
return FeaturesManager._SUPPORTED_MODEL_TYPE[model_type]
@staticmethod
def feature_to_task(feature: str) -> str:
return feature.replace('-with-past', '')
@staticmethod
def get_model_class_for_feature(feature: str) -> type:
"""
Attempts to retrieve an AutoModel class from a feature name.
Args:
feature (`str`):
The feature required.
Returns:
The AutoModel class corresponding to the feature.
"""
task = FeaturesManager.feature_to_task(feature)
task_to_automodel = FeaturesManager._TASKS_TO_AUTOMODELS
if task not in task_to_automodel:
raise KeyError(f'Unknown task: {feature}. Possible values are {list(FeaturesManager._TASKS_TO_AUTOMODELS.values())}')
return task_to_automodel[task]
@staticmethod
def get_model_from_feature(feature: str, model: str, cache_dir: Optional[str]=None) -> 'PreTrainedModel':
"""
Attempts to retrieve a model from a model's name and the feature to be enabled.
Args:
feature (`str`):
The feature required.
model (`str`):
The name of the model to export.
Returns:
The instance of the model.
"""
model_class = FeaturesManager.get_model_class_for_feature(feature)
model = model_class.from_pretrained(model, cache_dir=cache_dir)
return model
@staticmethod
def check_supported_model_or_raise(model: 'PreTrainedModel', feature: str='default') -> tuple[str, Callable]:
"""
Check whether or not the model has the requested features.
Args:
model: The model to export.
feature: The name of the feature to check if it is available.
Returns:
(str) The type of the model (OnnxConfig) The OnnxConfig instance holding the model export properties.
"""
model_type = model.config.model_type.replace('_', '-')
model_name = getattr(model, 'name', '')
model_features = FeaturesManager.get_supported_features_for_model_type(model_type, model_name=model_name)
if feature not in model_features:
raise ValueError(f"{model.config.model_type} doesn't support feature {feature}. Supported values are: {model_features}")
return (model.config.model_type, FeaturesManager._SUPPORTED_MODEL_TYPE[model_type][feature])
def get_config(model_type: str, feature: str) -> OnnxConfig:
"""
Gets the OnnxConfig for a model_type and feature combination.
Args:
model_type (`str`):
The model type to retrieve the config for.
feature (`str`):
The feature to retrieve the config for.
Returns:
`OnnxConfig`: config for the combination
"""
return FeaturesManager._SUPPORTED_MODEL_TYPE[model_type][feature]
|
class FeaturesManager:
@staticmethod
def get_supported_features_for_model_type(model_type: str, model_name: Optional[str]=None) -> dict[str, Callable[[PretrainedConfig], OnnxConfig]]:
'''
Tries to retrieve the feature -> OnnxConfig constructor map from the model type.
Args:
model_type (`str`):
The model type to retrieve the supported features for.
model_name (`str`, *optional*):
The name attribute of the model object, only used for the exception message.
Returns:
The dictionary mapping each feature to a corresponding OnnxConfig constructor.
'''
pass
@staticmethod
def feature_to_task(feature: str) -> str:
pass
@staticmethod
def get_model_class_for_feature(feature: str) -> type:
'''
Attempts to retrieve an AutoModel class from a feature name.
Args:
feature (`str`):
The feature required.
Returns:
The AutoModel class corresponding to the feature.
'''
pass
@staticmethod
def get_model_from_feature(feature: str, model: str, cache_dir: Optional[str]=None) -> 'PreTrainedModel':
'''
Attempts to retrieve a model from a model's name and the feature to be enabled.
Args:
feature (`str`):
The feature required.
model (`str`):
The name of the model to export.
Returns:
The instance of the model.
'''
pass
@staticmethod
def check_supported_model_or_raise(model: 'PreTrainedModel', feature: str='default') -> tuple[str, Callable]:
'''
Check whether or not the model has the requested features.
Args:
model: The model to export.
feature: The name of the feature to check if it is available.
Returns:
(str) The type of the model (OnnxConfig) The OnnxConfig instance holding the model export properties.
'''
pass
def get_config(model_type: str, feature: str) -> OnnxConfig:
'''
Gets the OnnxConfig for a model_type and feature combination.
Args:
model_type (`str`):
The model type to retrieve the config for.
feature (`str`):
The feature to retrieve the config for.
Returns:
`OnnxConfig`: config for the combination
'''
pass
| 12
| 5
| 23
| 3
| 11
| 9
| 3
| 0.13
| 0
| 8
| 1
| 0
| 1
| 0
| 8
| 8
| 665
| 32
| 560
| 35
| 538
| 73
| 69
| 22
| 60
| 7
| 0
| 2
| 24
|
6,399
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/onnx/utils.py
|
transformers.onnx.utils.ParameterFormat
|
from enum import Enum
from ctypes import c_float, sizeof
class ParameterFormat(Enum):
Float = c_float
@property
def size(self) -> int:
"""
Number of byte required for this data type
Returns:
Integer > 0
"""
return sizeof(self.value)
|
class ParameterFormat(Enum):
@property
def size(self) -> int:
'''
Number of byte required for this data type
Returns:
Integer > 0
'''
pass
| 3
| 1
| 8
| 1
| 2
| 5
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 1
| 50
| 12
| 2
| 5
| 4
| 2
| 5
| 4
| 3
| 2
| 1
| 4
| 0
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.