id
int64 0
328k
| repository_name
stringlengths 7
58
| file_path
stringlengths 9
302
| class_name
stringlengths 5
256
| human_written_code
stringlengths 16
2.16M
| class_skeleton
stringlengths 18
1.49M
⌀ | total_program_units
int64 1
1.76k
| total_doc_str
int64 0
771
| AvgCountLine
float64 0
7.89k
| AvgCountLineBlank
float64 0
297
| AvgCountLineCode
float64 0
7.89k
| AvgCountLineComment
float64 0
7.89k
| AvgCyclomatic
float64 0
130
| CommentToCodeRatio
float64 0
168
| CountClassBase
float64 0
40
| CountClassCoupled
float64 0
583
| CountClassCoupledModified
float64 0
575
| CountClassDerived
float64 0
5.35k
| CountDeclInstanceMethod
float64 0
529
| CountDeclInstanceVariable
float64 0
296
| CountDeclMethod
float64 0
599
| CountDeclMethodAll
float64 0
1.12k
| CountLine
float64 1
40.4k
| CountLineBlank
float64 0
8.16k
| CountLineCode
float64 1
25.7k
| CountLineCodeDecl
float64 1
8.15k
| CountLineCodeExe
float64 0
24.2k
| CountLineComment
float64 0
16.5k
| CountStmt
float64 1
9.71k
| CountStmtDecl
float64 1
8.15k
| CountStmtExe
float64 0
9.69k
| MaxCyclomatic
float64 0
759
| MaxInheritanceTree
float64 0
16
| MaxNesting
float64 0
34
| SumCyclomatic
float64 0
2.9k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2,100
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/detr/modeling_detr.py
|
transformers.models.detr.modeling_detr.DetrMaskHeadSmallConv
|
from torch import Tensor, nn
import torch
class DetrMaskHeadSmallConv(nn.Module):
"""
Simple convolutional head, using group norm. Upsampling is done using a FPN approach
"""
def __init__(self, dim, fpn_dims, context_dim):
super().__init__()
if dim % 8 != 0:
raise ValueError('The hidden_size + number of attention heads must be divisible by 8 as the number of groups in GroupNorm is set to 8')
inter_dims = [dim, context_dim // 2, context_dim // 4, context_dim // 8, context_dim // 16, context_dim // 64]
self.lay1 = nn.Conv2d(dim, dim, 3, padding=1)
self.gn1 = nn.GroupNorm(8, dim)
self.lay2 = nn.Conv2d(dim, inter_dims[1], 3, padding=1)
self.gn2 = nn.GroupNorm(min(8, inter_dims[1]), inter_dims[1])
self.lay3 = nn.Conv2d(inter_dims[1], inter_dims[2], 3, padding=1)
self.gn3 = nn.GroupNorm(min(8, inter_dims[2]), inter_dims[2])
self.lay4 = nn.Conv2d(inter_dims[2], inter_dims[3], 3, padding=1)
self.gn4 = nn.GroupNorm(min(8, inter_dims[3]), inter_dims[3])
self.lay5 = nn.Conv2d(inter_dims[3], inter_dims[4], 3, padding=1)
self.gn5 = nn.GroupNorm(min(8, inter_dims[4]), inter_dims[4])
self.out_lay = nn.Conv2d(inter_dims[4], 1, 3, padding=1)
self.dim = dim
self.adapter1 = nn.Conv2d(fpn_dims[0], inter_dims[1], 1)
self.adapter2 = nn.Conv2d(fpn_dims[1], inter_dims[2], 1)
self.adapter3 = nn.Conv2d(fpn_dims[2], inter_dims[3], 1)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_uniform_(m.weight, a=1)
nn.init.constant_(m.bias, 0)
def forward(self, x: Tensor, bbox_mask: Tensor, fpns: list[Tensor]):
x = torch.cat([_expand(x, bbox_mask.shape[1]), bbox_mask.flatten(0, 1)], 1)
x = self.lay1(x)
x = self.gn1(x)
x = nn.functional.relu(x)
x = self.lay2(x)
x = self.gn2(x)
x = nn.functional.relu(x)
cur_fpn = self.adapter1(fpns[0])
if cur_fpn.size(0) != x.size(0):
cur_fpn = _expand(cur_fpn, x.size(0) // cur_fpn.size(0))
x = cur_fpn + nn.functional.interpolate(x, size=cur_fpn.shape[-2:], mode='nearest')
x = self.lay3(x)
x = self.gn3(x)
x = nn.functional.relu(x)
cur_fpn = self.adapter2(fpns[1])
if cur_fpn.size(0) != x.size(0):
cur_fpn = _expand(cur_fpn, x.size(0) // cur_fpn.size(0))
x = cur_fpn + nn.functional.interpolate(x, size=cur_fpn.shape[-2:], mode='nearest')
x = self.lay4(x)
x = self.gn4(x)
x = nn.functional.relu(x)
cur_fpn = self.adapter3(fpns[2])
if cur_fpn.size(0) != x.size(0):
cur_fpn = _expand(cur_fpn, x.size(0) // cur_fpn.size(0))
x = cur_fpn + nn.functional.interpolate(x, size=cur_fpn.shape[-2:], mode='nearest')
x = self.lay5(x)
x = self.gn5(x)
x = nn.functional.relu(x)
x = self.out_lay(x)
return x
|
class DetrMaskHeadSmallConv(nn.Module):
'''
Simple convolutional head, using group norm. Upsampling is done using a FPN approach
'''
def __init__(self, dim, fpn_dims, context_dim):
pass
def forward(self, x: Tensor, bbox_mask: Tensor, fpns: list[Tensor]):
pass
| 3
| 1
| 36
| 6
| 29
| 2
| 4
| 0.1
| 1
| 3
| 0
| 0
| 2
| 15
| 2
| 12
| 78
| 13
| 59
| 21
| 56
| 6
| 56
| 21
| 53
| 4
| 1
| 2
| 8
|
2,101
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/detr/modeling_detr.py
|
transformers.models.detr.modeling_detr.DetrModel
|
from .configuration_detr import DetrConfig
from torch import Tensor, nn
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithCrossAttentions, Seq2SeqModelOutput
import torch
from typing import Optional, Union
from ...utils import ModelOutput, auto_docstring, is_timm_available, logging, requires_backends
@auto_docstring(custom_intro='\n The bare DETR Model (consisting of a backbone and encoder-decoder Transformer) outputting raw hidden-states without\n any specific head on top.\n ')
class DetrModel(DetrPreTrainedModel):
def __init__(self, config: DetrConfig):
super().__init__(config)
backbone = DetrConvEncoder(config)
object_queries = build_position_encoding(config)
self.backbone = DetrConvModel(backbone, object_queries)
self.input_projection = nn.Conv2d(backbone.intermediate_channel_sizes[-1], config.d_model, kernel_size=1)
self.query_position_embeddings = nn.Embedding(config.num_queries, config.d_model)
self.encoder = DetrEncoder(config)
self.decoder = DetrDecoder(config)
self.post_init()
def get_encoder(self):
return self.encoder
def freeze_backbone(self):
for name, param in self.backbone.conv_encoder.model.named_parameters():
param.requires_grad_(False)
def unfreeze_backbone(self):
for name, param in self.backbone.conv_encoder.model.named_parameters():
param.requires_grad_(True)
@auto_docstring
def forward(self, pixel_values: torch.FloatTensor, pixel_mask: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.FloatTensor]=None, encoder_outputs: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, decoder_inputs_embeds: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.FloatTensor], DetrModelOutput]:
"""
decoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, num_queries)`, *optional*):
Not used by default. Can be used to mask object queries.
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing the flattened feature map (output of the backbone + projection layer), you
can choose to directly pass a flattened representation of an image.
decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_size)`, *optional*):
Optionally, instead of initializing the queries with a tensor of zeros, you can choose to directly pass an
embedded representation.
Examples:
```python
>>> from transformers import AutoImageProcessor, DetrModel
>>> from PIL import Image
>>> import requests
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> image_processor = AutoImageProcessor.from_pretrained("facebook/detr-resnet-50")
>>> model = DetrModel.from_pretrained("facebook/detr-resnet-50")
>>> # prepare image for the model
>>> inputs = image_processor(images=image, return_tensors="pt")
>>> # forward pass
>>> outputs = model(**inputs)
>>> # the last hidden states are the final query embeddings of the Transformer decoder
>>> # these are of shape (batch_size, num_queries, hidden_size)
>>> last_hidden_states = outputs.last_hidden_state
>>> list(last_hidden_states.shape)
[1, 100, 256]
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
batch_size, num_channels, height, width = pixel_values.shape
device = pixel_values.device
if pixel_mask is None:
pixel_mask = torch.ones((batch_size, height, width), device=device)
features, object_queries_list = self.backbone(pixel_values, pixel_mask)
feature_map, mask = features[-1]
if mask is None:
raise ValueError('Backbone does not return downsampled pixel mask')
projected_feature_map = self.input_projection(feature_map)
flattened_features = projected_feature_map.flatten(2).permute(0, 2, 1)
object_queries = object_queries_list[-1].flatten(2).permute(0, 2, 1)
flattened_mask = mask.flatten(1)
if encoder_outputs is None:
encoder_outputs = self.encoder(inputs_embeds=flattened_features, attention_mask=flattened_mask, object_queries=object_queries, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
elif return_dict and (not isinstance(encoder_outputs, BaseModelOutput)):
encoder_outputs = BaseModelOutput(last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None)
query_position_embeddings = self.query_position_embeddings.weight.unsqueeze(0).repeat(batch_size, 1, 1)
queries = torch.zeros_like(query_position_embeddings)
decoder_outputs = self.decoder(inputs_embeds=queries, attention_mask=None, object_queries=object_queries, query_position_embeddings=query_position_embeddings, encoder_hidden_states=encoder_outputs[0], encoder_attention_mask=flattened_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
if not return_dict:
return decoder_outputs + encoder_outputs
return DetrModelOutput(last_hidden_state=decoder_outputs.last_hidden_state, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, intermediate_hidden_states=decoder_outputs.intermediate_hidden_states)
| null | 8
| 1
| 25
| 4
| 15
| 6
| 3
| 0.4
| 1
| 10
| 7
| 0
| 6
| 5
| 6
| 7
| 159
| 30
| 92
| 39
| 72
| 37
| 46
| 27
| 39
| 11
| 2
| 1
| 18
|
2,102
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/detr/modeling_detr.py
|
transformers.models.detr.modeling_detr.DetrObjectDetectionOutput
|
from ...utils import ModelOutput, auto_docstring, is_timm_available, logging, requires_backends
from dataclasses import dataclass
import torch
from typing import Optional, Union
@dataclass
@auto_docstring(custom_intro='\n Output type of [`DetrForObjectDetection`].\n ')
class DetrObjectDetectionOutput(ModelOutput):
"""
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` are provided)):
Total loss as a linear combination of a negative log-likehood (cross-entropy) for class prediction and a
bounding box loss. The latter is defined as a linear combination of the L1 loss and the generalized
scale-invariant IoU loss.
loss_dict (`Dict`, *optional*):
A dictionary containing the individual losses. Useful for logging.
logits (`torch.FloatTensor` of shape `(batch_size, num_queries, num_classes + 1)`):
Classification logits (including no-object) for all queries.
pred_boxes (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)`):
Normalized boxes coordinates for all queries, represented as (center_x, center_y, width, height). These
values are normalized in [0, 1], relative to the size of each individual image in the batch (disregarding
possible padding). You can use [`~DetrImageProcessor.post_process_object_detection`] to retrieve the
unnormalized bounding boxes.
auxiliary_outputs (`list[Dict]`, *optional*):
Optional, only returned when auxiliary losses are activated (i.e. `config.auxiliary_loss` is set to `True`)
and labels are provided. It is a list of dictionaries containing the two above keys (`logits` and
`pred_boxes`) for each decoder layer.
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the decoder of the model.
"""
loss: Optional[torch.FloatTensor] = None
loss_dict: Optional[dict] = None
logits: Optional[torch.FloatTensor] = None
pred_boxes: Optional[torch.FloatTensor] = None
auxiliary_outputs: Optional[list[dict]] = None
last_hidden_state: Optional[torch.FloatTensor] = None
decoder_hidden_states: Optional[tuple[torch.FloatTensor]] = None
decoder_attentions: Optional[tuple[torch.FloatTensor]] = None
cross_attentions: Optional[tuple[torch.FloatTensor]] = None
encoder_last_hidden_state: Optional[torch.FloatTensor] = None
encoder_hidden_states: Optional[tuple[torch.FloatTensor]] = None
encoder_attentions: Optional[tuple[torch.FloatTensor]] = None
|
@dataclass
@auto_docstring(custom_intro='\n Output type of [`DetrForObjectDetection`].\n ')
class DetrObjectDetectionOutput(ModelOutput):
'''
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` are provided)):
Total loss as a linear combination of a negative log-likehood (cross-entropy) for class prediction and a
bounding box loss. The latter is defined as a linear combination of the L1 loss and the generalized
scale-invariant IoU loss.
loss_dict (`Dict`, *optional*):
A dictionary containing the individual losses. Useful for logging.
logits (`torch.FloatTensor` of shape `(batch_size, num_queries, num_classes + 1)`):
Classification logits (including no-object) for all queries.
pred_boxes (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)`):
Normalized boxes coordinates for all queries, represented as (center_x, center_y, width, height). These
values are normalized in [0, 1], relative to the size of each individual image in the batch (disregarding
possible padding). You can use [`~DetrImageProcessor.post_process_object_detection`] to retrieve the
unnormalized bounding boxes.
auxiliary_outputs (`list[Dict]`, *optional*):
Optional, only returned when auxiliary losses are activated (i.e. `config.auxiliary_loss` is set to `True`)
and labels are provided. It is a list of dictionaries containing the two above keys (`logits` and
`pred_boxes`) for each decoder layer.
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the decoder of the model.
'''
pass
| 3
| 1
| 0
| 0
| 0
| 0
| 0
| 3.46
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 60
| 2
| 13
| 13
| 12
| 45
| 13
| 13
| 12
| 0
| 1
| 0
| 0
|
2,103
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/detr/modeling_detr.py
|
transformers.models.detr.modeling_detr.DetrPreTrainedModel
|
from torch import Tensor, nn
from ...modeling_utils import PreTrainedModel
from ...utils import ModelOutput, auto_docstring, is_timm_available, logging, requires_backends
from .configuration_detr import DetrConfig
@auto_docstring
class DetrPreTrainedModel(PreTrainedModel):
config: DetrConfig
base_model_prefix = 'model'
main_input_name = 'pixel_values'
_no_split_modules = ['DetrConvEncoder', 'DetrEncoderLayer', 'DetrDecoderLayer']
def _init_weights(self, module):
std = self.config.init_std
xavier_std = self.config.init_xavier_std
if isinstance(module, DetrMHAttentionMap):
nn.init.zeros_(module.k_linear.bias)
nn.init.zeros_(module.q_linear.bias)
nn.init.xavier_uniform_(module.k_linear.weight, gain=xavier_std)
nn.init.xavier_uniform_(module.q_linear.weight, gain=xavier_std)
elif isinstance(module, DetrLearnedPositionEmbedding):
nn.init.uniform_(module.row_embeddings.weight)
nn.init.uniform_(module.column_embeddings.weight)
if isinstance(module, (nn.Linear, nn.Conv2d, nn.BatchNorm2d)):
module.weight.data.normal_(mean=0.0, std=std)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=std)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
|
@auto_docstring
class DetrPreTrainedModel(PreTrainedModel):
def _init_weights(self, module):
pass
| 3
| 0
| 22
| 1
| 19
| 2
| 7
| 0.08
| 1
| 2
| 2
| 5
| 1
| 0
| 1
| 1
| 28
| 2
| 24
| 8
| 22
| 2
| 22
| 8
| 20
| 7
| 1
| 2
| 7
|
2,104
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/detr/modeling_detr.py
|
transformers.models.detr.modeling_detr.DetrSegmentationOutput
|
import torch
from typing import Optional, Union
from ...utils import ModelOutput, auto_docstring, is_timm_available, logging, requires_backends
from dataclasses import dataclass
@dataclass
@auto_docstring(custom_intro='\n Output type of [`DetrForSegmentation`].\n ')
class DetrSegmentationOutput(ModelOutput):
"""
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` are provided)):
Total loss as a linear combination of a negative log-likehood (cross-entropy) for class prediction and a
bounding box loss. The latter is defined as a linear combination of the L1 loss and the generalized
scale-invariant IoU loss.
loss_dict (`Dict`, *optional*):
A dictionary containing the individual losses. Useful for logging.
logits (`torch.FloatTensor` of shape `(batch_size, num_queries, num_classes + 1)`):
Classification logits (including no-object) for all queries.
pred_boxes (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)`):
Normalized boxes coordinates for all queries, represented as (center_x, center_y, width, height). These
values are normalized in [0, 1], relative to the size of each individual image in the batch (disregarding
possible padding). You can use [`~DetrImageProcessor.post_process_object_detection`] to retrieve the
unnormalized bounding boxes.
pred_masks (`torch.FloatTensor` of shape `(batch_size, num_queries, height/4, width/4)`):
Segmentation masks logits for all queries. See also
[`~DetrImageProcessor.post_process_semantic_segmentation`] or
[`~DetrImageProcessor.post_process_instance_segmentation`]
[`~DetrImageProcessor.post_process_panoptic_segmentation`] to evaluate semantic, instance and panoptic
segmentation masks respectively.
auxiliary_outputs (`list[Dict]`, *optional*):
Optional, only returned when auxiliary losses are activated (i.e. `config.auxiliary_loss` is set to `True`)
and labels are provided. It is a list of dictionaries containing the two above keys (`logits` and
`pred_boxes`) for each decoder layer.
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the decoder of the model.
"""
loss: Optional[torch.FloatTensor] = None
loss_dict: Optional[dict] = None
logits: Optional[torch.FloatTensor] = None
pred_boxes: Optional[torch.FloatTensor] = None
pred_masks: Optional[torch.FloatTensor] = None
auxiliary_outputs: Optional[list[dict]] = None
last_hidden_state: Optional[torch.FloatTensor] = None
decoder_hidden_states: Optional[tuple[torch.FloatTensor]] = None
decoder_attentions: Optional[tuple[torch.FloatTensor]] = None
cross_attentions: Optional[tuple[torch.FloatTensor]] = None
encoder_last_hidden_state: Optional[torch.FloatTensor] = None
encoder_hidden_states: Optional[tuple[torch.FloatTensor]] = None
encoder_attentions: Optional[tuple[torch.FloatTensor]] = None
|
@dataclass
@auto_docstring(custom_intro='\n Output type of [`DetrForSegmentation`].\n ')
class DetrSegmentationOutput(ModelOutput):
'''
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` are provided)):
Total loss as a linear combination of a negative log-likehood (cross-entropy) for class prediction and a
bounding box loss. The latter is defined as a linear combination of the L1 loss and the generalized
scale-invariant IoU loss.
loss_dict (`Dict`, *optional*):
A dictionary containing the individual losses. Useful for logging.
logits (`torch.FloatTensor` of shape `(batch_size, num_queries, num_classes + 1)`):
Classification logits (including no-object) for all queries.
pred_boxes (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)`):
Normalized boxes coordinates for all queries, represented as (center_x, center_y, width, height). These
values are normalized in [0, 1], relative to the size of each individual image in the batch (disregarding
possible padding). You can use [`~DetrImageProcessor.post_process_object_detection`] to retrieve the
unnormalized bounding boxes.
pred_masks (`torch.FloatTensor` of shape `(batch_size, num_queries, height/4, width/4)`):
Segmentation masks logits for all queries. See also
[`~DetrImageProcessor.post_process_semantic_segmentation`] or
[`~DetrImageProcessor.post_process_instance_segmentation`]
[`~DetrImageProcessor.post_process_panoptic_segmentation`] to evaluate semantic, instance and panoptic
segmentation masks respectively.
auxiliary_outputs (`list[Dict]`, *optional*):
Optional, only returned when auxiliary losses are activated (i.e. `config.auxiliary_loss` is set to `True`)
and labels are provided. It is a list of dictionaries containing the two above keys (`logits` and
`pred_boxes`) for each decoder layer.
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the decoder of the model.
'''
pass
| 3
| 1
| 0
| 0
| 0
| 0
| 0
| 3.64
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 67
| 2
| 14
| 14
| 13
| 51
| 14
| 14
| 13
| 0
| 1
| 0
| 0
|
2,105
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/detr/modeling_detr.py
|
transformers.models.detr.modeling_detr.DetrSinePositionEmbedding
|
import math
import torch
from torch import Tensor, nn
class DetrSinePositionEmbedding(nn.Module):
"""
This is a more standard version of the position embedding, very similar to the one used by the Attention is all you
need paper, generalized to work on images.
"""
def __init__(self, embedding_dim=64, temperature=10000, normalize=False, scale=None):
super().__init__()
self.embedding_dim = embedding_dim
self.temperature = temperature
self.normalize = normalize
if scale is not None and normalize is False:
raise ValueError('normalize should be True if scale is passed')
if scale is None:
scale = 2 * math.pi
self.scale = scale
def forward(self, pixel_values, pixel_mask):
if pixel_mask is None:
raise ValueError('No pixel mask provided')
y_embed = pixel_mask.cumsum(1, dtype=torch.float32)
x_embed = pixel_mask.cumsum(2, dtype=torch.float32)
if self.normalize:
y_embed = y_embed / (y_embed[:, -1:, :] + 1e-06) * self.scale
x_embed = x_embed / (x_embed[:, :, -1:] + 1e-06) * self.scale
dim_t = torch.arange(self.embedding_dim, dtype=torch.int64, device=pixel_values.device).float()
dim_t = self.temperature ** (2 * torch.div(dim_t, 2, rounding_mode='floor') / self.embedding_dim)
pos_x = x_embed[:, :, :, None] / dim_t
pos_y = y_embed[:, :, :, None] / dim_t
pos_x = torch.stack((pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4).flatten(3)
pos_y = torch.stack((pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4).flatten(3)
pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2)
return pos
|
class DetrSinePositionEmbedding(nn.Module):
'''
This is a more standard version of the position embedding, very similar to the one used by the Attention is all you
need paper, generalized to work on images.
'''
def __init__(self, embedding_dim=64, temperature=10000, normalize=False, scale=None):
pass
def forward(self, pixel_values, pixel_mask):
pass
| 3
| 1
| 14
| 1
| 13
| 0
| 3
| 0.15
| 1
| 2
| 0
| 0
| 2
| 4
| 2
| 12
| 35
| 4
| 27
| 13
| 24
| 4
| 27
| 13
| 24
| 3
| 1
| 1
| 6
|
2,106
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/diffllama/configuration_diffllama.py
|
transformers.models.diffllama.configuration_diffllama.DiffLlamaConfig
|
from ...modeling_rope_utils import rope_config_validation
from ...configuration_utils import PretrainedConfig
class DiffLlamaConfig(PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`DiffLlamaModel`]. It is used to instantiate an DiffLlama
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults
will yield a similar configuration to that of the [kajuma/DiffLlama-0.3B-handcut](https://huggingface.co/kajuma/DiffLlama-0.3B-handcut).
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 32000):
Vocabulary size of the DiffLlama model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`DiffLlamaModel`]
hidden_size (`int`, *optional*, defaults to 2048):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 8192):
Dimension of the MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 16):
Number of hidden layers in the Transformer decoder.
num_attention_heads (`int`, *optional*, defaults to 32):
Number of attention heads for each attention layer in the Transformer decoder.
num_key_value_heads (`int`, *optional*):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
`num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details, check out [this
paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to
`num_attention_heads`.
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the decoder.
max_position_embeddings (`int`, *optional*, defaults to 2048):
The maximum sequence length that this model might ever be used with.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
rms_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the rms normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
pad_token_id (`int`, *optional*):
Padding token id.
bos_token_id (`int`, *optional*, defaults to 1):
Beginning of stream token id.
eos_token_id (`int`, *optional*, defaults to 2):
End of stream token id.
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether to tie weight embeddings
rope_theta (`float`, *optional*, defaults to 10000.0):
The base period of the RoPE embeddings.
rope_scaling (`Dict`, *optional*):
Dictionary containing the scaling configuration for the RoPE embeddings. NOTE: if you apply new rope type
and you expect the model to work on longer `max_position_embeddings`, we recommend you to update this value
accordingly.
Expected contents:
`rope_type` (`str`):
The sub-variant of RoPE to use. Can be one of ['default', 'linear', 'dynamic', 'yarn', 'longrope',
'diffllama3'], with 'default' being the original RoPE implementation.
`factor` (`float`, *optional*):
Used with all rope types except 'default'. The scaling factor to apply to the RoPE embeddings. In
most scaling types, a `factor` of x will enable the model to handle sequences of length x *
original maximum pre-trained length.
`original_max_position_embeddings` (`int`, *optional*):
Used with 'dynamic', 'longrope' and 'diffllama3'. The original max position embeddings used during
pretraining.
`attention_factor` (`float`, *optional*):
Used with 'yarn' and 'longrope'. The scaling factor to be applied on the attention
computation. If unspecified, it defaults to value recommended by the implementation, using the
`factor` field to infer the suggested value.
`beta_fast` (`float`, *optional*):
Only used with 'yarn'. Parameter to set the boundary for extrapolation (only) in the linear
ramp function. If unspecified, it defaults to 32.
`beta_slow` (`float`, *optional*):
Only used with 'yarn'. Parameter to set the boundary for interpolation (only) in the linear
ramp function. If unspecified, it defaults to 1.
`short_factor` (`list[float]`, *optional*):
Only used with 'longrope'. The scaling factor to be applied to short contexts (<
`original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden
size divided by the number of attention heads divided by 2
`long_factor` (`list[float]`, *optional*):
Only used with 'longrope'. The scaling factor to be applied to long contexts (<
`original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden
size divided by the number of attention heads divided by 2
`low_freq_factor` (`float`, *optional*):
Only used with 'diffllama3'. Scaling factor applied to low frequency components of the RoPE
`high_freq_factor` (`float`, *optional*):
Only used with 'diffllama3'. Scaling factor applied to high frequency components of the RoPE
attention_bias (`bool`, *optional*, defaults to `False`):
Whether to use a bias in the query, key, value and output projection layers during self-attention.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
lambda_std_dev (`float`, *optional*, defaults to 0.1):
The standard deviation for initialization of parameter lambda in attention layer.
head_dim (`int`, *optional*):
The attention head dimension. If None, it will default to hidden_size // num_heads
```python
>>> from transformers import DiffLlamaModel, DiffLlamaConfig
>>> # Initializing a DiffLlama diffllama-7b style configuration
>>> configuration = DiffLlamaConfig()
>>> # Initializing a model from the diffllama-7b style configuration
>>> model = DiffLlamaModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = 'diffllama'
keys_to_ignore_at_inference = ['past_key_values']
def __init__(self, vocab_size=32000, hidden_size=2048, intermediate_size=8192, num_hidden_layers=16, num_attention_heads=32, num_key_value_heads=None, hidden_act='silu', max_position_embeddings=2048, initializer_range=0.02, rms_norm_eps=1e-05, use_cache=True, pad_token_id=None, bos_token_id=1, eos_token_id=2, tie_word_embeddings=False, rope_theta=10000.0, rope_scaling=None, attention_bias=False, attention_dropout=0.0, lambda_std_dev=0.1, head_dim=None, **kwargs):
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
if num_key_value_heads is None:
num_key_value_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.rms_norm_eps = rms_norm_eps
self.use_cache = use_cache
self.rope_theta = rope_theta
self.rope_scaling = rope_scaling
self.attention_bias = attention_bias
self.attention_dropout = attention_dropout
self.lambda_std_dev = lambda_std_dev
self.head_dim = head_dim if head_dim is not None else self.hidden_size // self.num_attention_heads
if self.rope_scaling is not None and 'type' in self.rope_scaling:
self.rope_scaling['rope_type'] = self.rope_scaling['type']
rope_config_validation(self)
super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs)
|
class DiffLlamaConfig(PretrainedConfig):
'''
This is the configuration class to store the configuration of a [`DiffLlamaModel`]. It is used to instantiate an DiffLlama
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults
will yield a similar configuration to that of the [kajuma/DiffLlama-0.3B-handcut](https://huggingface.co/kajuma/DiffLlama-0.3B-handcut).
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 32000):
Vocabulary size of the DiffLlama model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`DiffLlamaModel`]
hidden_size (`int`, *optional*, defaults to 2048):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 8192):
Dimension of the MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 16):
Number of hidden layers in the Transformer decoder.
num_attention_heads (`int`, *optional*, defaults to 32):
Number of attention heads for each attention layer in the Transformer decoder.
num_key_value_heads (`int`, *optional*):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
`num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details, check out [this
paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to
`num_attention_heads`.
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the decoder.
max_position_embeddings (`int`, *optional*, defaults to 2048):
The maximum sequence length that this model might ever be used with.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
rms_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the rms normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
pad_token_id (`int`, *optional*):
Padding token id.
bos_token_id (`int`, *optional*, defaults to 1):
Beginning of stream token id.
eos_token_id (`int`, *optional*, defaults to 2):
End of stream token id.
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether to tie weight embeddings
rope_theta (`float`, *optional*, defaults to 10000.0):
The base period of the RoPE embeddings.
rope_scaling (`Dict`, *optional*):
Dictionary containing the scaling configuration for the RoPE embeddings. NOTE: if you apply new rope type
and you expect the model to work on longer `max_position_embeddings`, we recommend you to update this value
accordingly.
Expected contents:
`rope_type` (`str`):
The sub-variant of RoPE to use. Can be one of ['default', 'linear', 'dynamic', 'yarn', 'longrope',
'diffllama3'], with 'default' being the original RoPE implementation.
`factor` (`float`, *optional*):
Used with all rope types except 'default'. The scaling factor to apply to the RoPE embeddings. In
most scaling types, a `factor` of x will enable the model to handle sequences of length x *
original maximum pre-trained length.
`original_max_position_embeddings` (`int`, *optional*):
Used with 'dynamic', 'longrope' and 'diffllama3'. The original max position embeddings used during
pretraining.
`attention_factor` (`float`, *optional*):
Used with 'yarn' and 'longrope'. The scaling factor to be applied on the attention
computation. If unspecified, it defaults to value recommended by the implementation, using the
`factor` field to infer the suggested value.
`beta_fast` (`float`, *optional*):
Only used with 'yarn'. Parameter to set the boundary for extrapolation (only) in the linear
ramp function. If unspecified, it defaults to 32.
`beta_slow` (`float`, *optional*):
Only used with 'yarn'. Parameter to set the boundary for interpolation (only) in the linear
ramp function. If unspecified, it defaults to 1.
`short_factor` (`list[float]`, *optional*):
Only used with 'longrope'. The scaling factor to be applied to short contexts (<
`original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden
size divided by the number of attention heads divided by 2
`long_factor` (`list[float]`, *optional*):
Only used with 'longrope'. The scaling factor to be applied to long contexts (<
`original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden
size divided by the number of attention heads divided by 2
`low_freq_factor` (`float`, *optional*):
Only used with 'diffllama3'. Scaling factor applied to low frequency components of the RoPE
`high_freq_factor` (`float`, *optional*):
Only used with 'diffllama3'. Scaling factor applied to high frequency components of the RoPE
attention_bias (`bool`, *optional*, defaults to `False`):
Whether to use a bias in the query, key, value and output projection layers during self-attention.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
lambda_std_dev (`float`, *optional*, defaults to 0.1):
The standard deviation for initialization of parameter lambda in attention layer.
head_dim (`int`, *optional*):
The attention head dimension. If None, it will default to hidden_size // num_heads
```python
>>> from transformers import DiffLlamaModel, DiffLlamaConfig
>>> # Initializing a DiffLlama diffllama-7b style configuration
>>> configuration = DiffLlamaConfig()
>>> # Initializing a model from the diffllama-7b style configuration
>>> model = DiffLlamaModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```'''
def __init__(self, vocab_size=32000, hidden_size=2048, intermediate_size=8192, num_hidden_layers=16, num_attention_heads=32, num_key_value_heads=None, hidden_act='silu', max_position_embeddings=2048, initializer_range=0.02, rms_norm_eps=1e-05, use_cache=True, pad_token_id=None, bos_token_id=1, eos_token_id=2, tie_word_embeddings=False, rope_theta=10000.0, rope_scaling=None, attention_bias=False, attention_dropout=0.0, lambda_std_dev=0.1, head_dim=None, **kwargs):
pass
| 2
| 1
| 60
| 3
| 54
| 3
| 4
| 1.82
| 1
| 1
| 0
| 0
| 1
| 17
| 1
| 1
| 173
| 12
| 57
| 45
| 31
| 104
| 27
| 21
| 25
| 4
| 1
| 1
| 4
|
2,107
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/diffllama/modeling_diffllama.py
|
transformers.models.diffllama.modeling_diffllama.DiffLlamaAttention
|
import math
from ...cache_utils import Cache, DynamicCache, StaticCache
from ...utils.deprecation import deprecate_kwarg
from .configuration_diffllama import DiffLlamaConfig
from torch import nn
from typing import Optional, Union
import torch
class DiffLlamaAttention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, config: DiffLlamaConfig, layer_idx: Optional[int]=None):
super().__init__()
self.config = config
self.layer_idx = layer_idx
if layer_idx is None:
logger.warning_once(f'Instantiating {self.__class__.__name__} without passing a `layer_idx` is not recommended and will lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` when creating this class.')
self.attention_dropout = config.attention_dropout
self.hidden_size = config.hidden_size
self.num_heads = config.num_attention_heads
self.head_dim = getattr(config, 'head_dim', self.hidden_size // self.num_heads)
self.num_key_value_heads = config.num_key_value_heads
self.num_key_value_groups = self.num_heads // self.num_key_value_heads
self.max_position_embeddings = config.max_position_embeddings
self.rope_theta = config.rope_theta
self.is_causal = True
self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=config.attention_bias)
self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.attention_bias)
self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.attention_bias)
self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=config.attention_bias)
self.lambda_init = lambda_init_fn(layer_idx)
self.lambda_q1 = nn.Parameter(torch.normal(0, config.lambda_std_dev, size=(self.head_dim,)))
self.lambda_k1 = nn.Parameter(torch.normal(0, config.lambda_std_dev, size=(self.head_dim,)))
self.lambda_q2 = nn.Parameter(torch.normal(0, config.lambda_std_dev, size=(self.head_dim,)))
self.lambda_k2 = nn.Parameter(torch.normal(0, config.lambda_std_dev, size=(self.head_dim,)))
self.groupnorm = nn.RMSNorm(2 * self.head_dim, eps=config.rms_norm_eps, elementwise_affine=False)
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, position_embeddings: tuple[torch.Tensor, torch.Tensor], attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, use_cache: bool=False, cache_position: Optional[torch.LongTensor]=None, **kwargs) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
bsz, target_len, _ = hidden_states.size()
q_len = target_len
query_states = self.q_proj(hidden_states)
key_states = self.k_proj(hidden_states)
value_states = self.v_proj(hidden_states)
query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
if past_key_values is not None:
cache_kwargs = {'sin': sin, 'cos': cos, 'cache_position': cache_position}
key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
key_states = repeat_kv(key_states, self.num_key_value_groups)
value_states = repeat_kv(value_states, self.num_key_value_groups)
value_states = torch.cat(torch.chunk(value_states, 2, dim=1), dim=-1)
value_states = value_states.repeat(1, 2, 1, 1)
attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
if attention_mask is not None:
causal_mask = attention_mask[:, :, :, :key_states.shape[-2]]
attn_weights = attn_weights + causal_mask
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
attn_weights = nn.functional.dropout(attn_weights, p=self.attention_dropout, training=self.training)
lambda_1 = torch.exp(torch.sum(self.lambda_q1 * self.lambda_k1, dim=-1, dtype=torch.float32)).to(query_states.dtype)
lambda_2 = torch.exp(torch.sum(self.lambda_q2 * self.lambda_k2, dim=-1, dtype=torch.float32)).to(query_states.dtype)
lambda_full = lambda_1 - lambda_2 + self.lambda_init
attn_output = torch.matmul(attn_weights, value_states)
attn_output1, attn_output2 = torch.chunk(attn_output, 2, dim=1)
attn_output = attn_output1 - lambda_full * attn_output2
attn_output = (1 - self.lambda_init) * self.groupnorm(attn_output)
attn_output = attn_output.transpose(1, 2).contiguous()
attn_output = attn_output.reshape(bsz, q_len, -1)
attn_output = self.o_proj(attn_output)
return (attn_output, attn_weights)
|
class DiffLlamaAttention(nn.Module):
'''Multi-headed attention from 'Attention Is All You Need' paper'''
def __init__(self, config: DiffLlamaConfig, layer_idx: Optional[int]=None):
pass
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, position_embeddings: tuple[torch.Tensor, torch.Tensor], attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, use_cache: bool=False, cache_position: Optional[torch.LongTensor]=None, **kwargs) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
pass
| 4
| 1
| 50
| 8
| 41
| 2
| 3
| 0.06
| 1
| 6
| 2
| 2
| 2
| 21
| 2
| 12
| 104
| 18
| 82
| 49
| 68
| 5
| 63
| 38
| 60
| 4
| 1
| 1
| 6
|
2,108
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/diffllama/modeling_diffllama.py
|
transformers.models.diffllama.modeling_diffllama.DiffLlamaDecoderLayer
|
from ...cache_utils import Cache, DynamicCache, StaticCache
from ...utils.deprecation import deprecate_kwarg
from typing import Optional, Union
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple, logging
from .configuration_diffllama import DiffLlamaConfig
from ...processing_utils import Unpack
from ...modeling_layers import GenericForQuestionAnswering, GenericForSequenceClassification, GenericForTokenClassification, GradientCheckpointingLayer
import torch
class DiffLlamaDecoderLayer(GradientCheckpointingLayer):
def __init__(self, config: DiffLlamaConfig, layer_idx: int):
super().__init__()
self.hidden_size = config.hidden_size
self.self_attn = DIFFLLAMA_ATTENTION_CLASSES[config._attn_implementation](config=config, layer_idx=layer_idx)
self.mlp = DiffLlamaMLP(config)
self.input_layernorm = DiffLlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.post_attention_layernorm = DiffLlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, use_cache: Optional[bool]=False, cache_position: Optional[torch.LongTensor]=None, position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]]=None, **kwargs: Unpack[TransformersKwargs]) -> torch.Tensor:
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
hidden_states, _ = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, use_cache=use_cache, cache_position=cache_position, position_embeddings=position_embeddings, **kwargs)
hidden_states = residual + hidden_states
residual = hidden_states
hidden_states = self.post_attention_layernorm(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + hidden_states
return hidden_states
|
class DiffLlamaDecoderLayer(GradientCheckpointingLayer):
def __init__(self, config: DiffLlamaConfig, layer_idx: int):
pass
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, use_cache: Optional[bool]=False, cache_position: Optional[torch.LongTensor]=None, position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]]=None, **kwargs: Unpack[TransformersKwargs]) -> torch.Tensor:
pass
| 4
| 0
| 25
| 4
| 21
| 2
| 2
| 0.07
| 1
| 9
| 5
| 0
| 2
| 5
| 2
| 12
| 52
| 8
| 42
| 22
| 28
| 3
| 21
| 11
| 18
| 2
| 1
| 1
| 3
|
2,109
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/diffllama/modeling_diffllama.py
|
transformers.models.diffllama.modeling_diffllama.DiffLlamaFlashAttention2
|
from ...modeling_flash_attention_utils import _flash_attention_forward, flash_attn_supports_top_left_mask
from ...cache_utils import Cache, DynamicCache, StaticCache
from typing import Optional, Union
from ...utils.deprecation import deprecate_kwarg
import torch
class DiffLlamaFlashAttention2(DiffLlamaAttention):
"""
DiffLlama flash attention module. This module inherits from `DiffLlamaAttention` as the weights of the module stays
untouched. The only required change would be on the forward pass where it needs to correctly call the public API of
flash attention and deal with padding tokens in case the input contains any of them.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._flash_attn_uses_top_left_mask = flash_attn_supports_top_left_mask()
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, position_embeddings: tuple[torch.Tensor, torch.Tensor], attention_mask: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, use_cache: bool=False, cache_position: Optional[torch.LongTensor]=None) -> tuple[torch.Tensor, None]:
if isinstance(past_key_values, StaticCache):
raise ValueError('`static` cache implementation is not compatible with `attn_implementation==flash_attention_2` make sure to use `sdpa` in the mean time, and open an issue at https://github.com/huggingface/transformers')
bsz, q_len, _ = hidden_states.size()
query_states = self.q_proj(hidden_states)
key_states = self.k_proj(hidden_states)
value_states = self.v_proj(hidden_states)
query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
if position_embeddings is None:
logger.warning_once('The attention layers in this model are transitioning from computing the RoPE embeddings internally through `position_ids` (2D tensor with the indexes of the tokens), to using externally computed `position_embeddings` (Tuple of tensors, containing cos and sin). In v4.46 `position_ids` will be removed and `position_embeddings` will be mandatory.')
cos, sin = self.rotary_emb(value_states, position_ids)
else:
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
if past_key_values is not None:
cache_kwargs = {'sin': sin, 'cos': cos, 'cache_position': cache_position}
key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
query_states = query_states.transpose(1, 2)
key_states = key_states.transpose(1, 2)
value_states = value_states.transpose(1, 2)
dropout_rate = self.attention_dropout if self.training else 0.0
input_dtype = query_states.dtype
device_type = query_states.device.type if query_states.device.type != 'mps' else 'cpu'
if input_dtype == torch.float32:
if torch.is_autocast_enabled():
target_dtype = torch.get_autocast_dtype(device_type) if hasattr(torch, 'get_autocast_dtype') else torch.get_autocast_gpu_dtype()
elif hasattr(self.config, '_pre_quantization_dtype'):
target_dtype = self.config._pre_quantization_dtype
else:
target_dtype = self.q_proj.weight.dtype
logger.warning_once(f'The input hidden states seems to be silently casted in float32, this might be related to the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in {target_dtype}.')
query_states = query_states.to(target_dtype)
key_states = key_states.to(target_dtype)
value_states = value_states.to(target_dtype)
value_states1, value_states2 = torch.chunk(value_states, 2, dim=2)
value_states1 = value_states1.repeat(1, 1, 2, 1)
value_states2 = value_states2.repeat(1, 1, 2, 1)
attn_output1 = _flash_attention_forward(query_states, key_states, value_states1, attention_mask, q_len, position_ids=position_ids, dropout=dropout_rate, sliding_window=getattr(self, 'sliding_window', None), use_top_left_mask=self._flash_attn_uses_top_left_mask, is_causal=self.is_causal)
attn_output2 = _flash_attention_forward(query_states, key_states, value_states2, attention_mask, q_len, position_ids=position_ids, dropout=dropout_rate, sliding_window=getattr(self, 'sliding_window', None), use_top_left_mask=self._flash_attn_uses_top_left_mask, is_causal=self.is_causal)
attn_output = torch.cat([attn_output1, attn_output2], dim=-1)
attn_output1, attn_output2 = torch.chunk(attn_output, 2, dim=2)
lambda_1 = torch.exp(torch.sum(self.lambda_q1 * self.lambda_k1, dim=-1, dtype=torch.float32)).to(query_states.dtype)
lambda_2 = torch.exp(torch.sum(self.lambda_q2 * self.lambda_k2, dim=-1, dtype=torch.float32)).to(query_states.dtype)
lambda_full = lambda_1 - lambda_2 + self.lambda_init
attn_output = attn_output1 - lambda_full * attn_output2
attn_output = (1 - self.lambda_init) * self.groupnorm(attn_output)
attn_output = attn_output.reshape(bsz, q_len, -1).contiguous()
attn_output = self.o_proj(attn_output)
return (attn_output, None)
|
class DiffLlamaFlashAttention2(DiffLlamaAttention):
'''
DiffLlama flash attention module. This module inherits from `DiffLlamaAttention` as the weights of the module stays
untouched. The only required change would be on the forward pass where it needs to correctly call the public API of
flash attention and deal with padding tokens in case the input contains any of them.
'''
def __init__(self, *args, **kwargs):
pass
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, position_embeddings: tuple[torch.Tensor, torch.Tensor], attention_mask: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, use_cache: bool=False, cache_position: Optional[torch.LongTensor]=None) -> tuple[torch.Tensor, None]:
pass
| 4
| 1
| 70
| 11
| 52
| 8
| 5
| 0.19
| 1
| 6
| 2
| 0
| 2
| 3
| 2
| 14
| 148
| 23
| 105
| 31
| 92
| 20
| 54
| 21
| 51
| 9
| 2
| 2
| 10
|
2,110
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/diffllama/modeling_diffllama.py
|
transformers.models.diffllama.modeling_diffllama.DiffLlamaForCausalLM
|
from ...processing_utils import Unpack
import torch
from typing import Optional, Union
from ...generation import GenerationMixin
from ...cache_utils import Cache, DynamicCache, StaticCache
from torch import nn
from ...modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple, logging
@auto_docstring
class DiffLlamaForCausalLM(DiffLlamaPreTrainedModel, GenerationMixin):
_tied_weights_keys = ['lm_head.weight']
_tp_plan = {'lm_head': 'colwise_rep'}
_pp_plan = {'lm_head': (['hidden_states'], ['logits'])}
def __init__(self, config):
super().__init__(config)
self.model = DiffLlamaModel(config)
self.vocab_size = config.vocab_size
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.post_init()
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, logits_to_keep: Union[int, torch.Tensor]=0, **kwargs: Unpack[TransformersKwargs]) -> CausalLMOutputWithPast:
"""
Example:
```python
>>> from transformers import AutoTokenizer, DiffLlamaForCausalLM
>>> model = DiffLlamaForCausalLM.from_pretrained("google/diffllama-7b")
>>> tokenizer = AutoTokenizer.from_pretrained("google/diffllama-7b")
>>> prompt = "What is your favorite condiment?"
>>> inputs = tokenizer(prompt, return_tensors="pt")
>>> # Generate
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"What is your favorite condiment?"
```"""
outputs: BaseModelOutputWithPast = self.model(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, cache_position=cache_position, **kwargs)
hidden_states = outputs.last_hidden_state
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :])
loss = None
if labels is not None:
loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.vocab_size, **kwargs)
return CausalLMOutputWithPast(loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
|
@auto_docstring
class DiffLlamaForCausalLM(DiffLlamaPreTrainedModel, GenerationMixin):
def __init__(self, config):
pass
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, logits_to_keep: Union[int, torch.Tensor]=0, **kwargs: Unpack[TransformersKwargs]) -> CausalLMOutputWithPast:
'''
Example:
```python
>>> from transformers import AutoTokenizer, DiffLlamaForCausalLM
>>> model = DiffLlamaForCausalLM.from_pretrained("google/diffllama-7b")
>>> tokenizer = AutoTokenizer.from_pretrained("google/diffllama-7b")
>>> prompt = "What is your favorite condiment?"
>>> inputs = tokenizer(prompt, return_tensors="pt")
>>> # Generate
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"What is your favorite condiment?"
```'''
pass
| 6
| 1
| 14
| 2
| 9
| 4
| 2
| 0.38
| 2
| 9
| 4
| 0
| 8
| 3
| 8
| 9
| 123
| 21
| 74
| 36
| 47
| 28
| 36
| 20
| 27
| 8
| 2
| 1
| 15
|
2,111
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/diffllama/modeling_diffllama.py
|
transformers.models.diffllama.modeling_diffllama.DiffLlamaForQuestionAnswering
|
from ...modeling_layers import GenericForQuestionAnswering, GenericForSequenceClassification, GenericForTokenClassification, GradientCheckpointingLayer
class DiffLlamaForQuestionAnswering(GenericForQuestionAnswering, DiffLlamaPreTrainedModel):
base_model_prefix = 'transformer'
|
class DiffLlamaForQuestionAnswering(GenericForQuestionAnswering, DiffLlamaPreTrainedModel):
pass
| 1
| 0
| 18
| 2
| 13
| 3
| 2
| 0.2
| 1
| 5
| 3
| 0
| 4
| 2
| 4
| 5
| 77
| 11
| 55
| 28
| 36
| 11
| 26
| 14
| 21
| 5
| 2
| 1
| 8
|
2,112
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/diffllama/modeling_diffllama.py
|
transformers.models.diffllama.modeling_diffllama.DiffLlamaForSequenceClassification
|
from ...modeling_layers import GenericForQuestionAnswering, GenericForSequenceClassification, GenericForTokenClassification, GradientCheckpointingLayer
class DiffLlamaForSequenceClassification(GenericForSequenceClassification, DiffLlamaPreTrainedModel):
pass
|
class DiffLlamaForSequenceClassification(GenericForSequenceClassification, DiffLlamaPreTrainedModel):
pass
| 1
| 0
| 21
| 2
| 17
| 2
| 3
| 0.11
| 1
| 7
| 3
| 0
| 4
| 3
| 4
| 5
| 90
| 11
| 71
| 31
| 53
| 8
| 36
| 18
| 31
| 9
| 2
| 1
| 12
|
2,113
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/diffllama/modeling_diffllama.py
|
transformers.models.diffllama.modeling_diffllama.DiffLlamaForTokenClassification
|
from ...modeling_layers import GenericForQuestionAnswering, GenericForSequenceClassification, GenericForTokenClassification, GradientCheckpointingLayer
class DiffLlamaForTokenClassification(GenericForTokenClassification, DiffLlamaPreTrainedModel):
pass
|
class DiffLlamaForTokenClassification(GenericForTokenClassification, DiffLlamaPreTrainedModel):
pass
| 1
| 0
| 17
| 1
| 14
| 2
| 3
| 0.11
| 1
| 5
| 2
| 0
| 4
| 4
| 4
| 5
| 79
| 8
| 64
| 28
| 41
| 7
| 29
| 15
| 24
| 5
| 2
| 1
| 10
|
2,114
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/diffllama/modeling_diffllama.py
|
transformers.models.diffllama.modeling_diffllama.DiffLlamaMLP
|
from torch import nn
from ...activations import ACT2FN
class DiffLlamaMLP(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.hidden_size = config.hidden_size
self.intermediate_size = config.intermediate_size
self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
self.act_fn = ACT2FN[config.hidden_act]
def forward(self, x):
down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
return down_proj
|
class DiffLlamaMLP(nn.Module):
def __init__(self, config):
pass
def forward(self, x):
pass
| 3
| 0
| 6
| 0
| 6
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 2
| 7
| 2
| 12
| 14
| 1
| 13
| 11
| 10
| 0
| 13
| 11
| 10
| 1
| 1
| 0
| 2
|
2,115
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/diffllama/modeling_diffllama.py
|
transformers.models.diffllama.modeling_diffllama.DiffLlamaModel
|
import torch
from ...cache_utils import Cache, DynamicCache, StaticCache
from ...masking_utils import create_causal_mask
from .configuration_diffllama import DiffLlamaConfig
from ...modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast
from ...utils.generic import check_model_inputs
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple, logging
from ...processing_utils import Unpack
from torch import nn
from typing import Optional, Union
@auto_docstring
class DiffLlamaModel(DiffLlamaPreTrainedModel):
def __init__(self, config: DiffLlamaConfig):
super().__init__(config)
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
self.layers = nn.ModuleList([DiffLlamaDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)])
self.norm = DiffLlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.rotary_emb = DiffLlamaRotaryEmbedding(config=config)
self.gradient_checkpointing = False
self.post_init()
@check_model_inputs
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, cache_position: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, **kwargs: Unpack[TransformersKwargs]) -> BaseModelOutputWithPast:
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError('You must specify exactly one of input_ids or inputs_embeds')
if inputs_embeds is None:
inputs_embeds: torch.Tensor = self.embed_tokens(input_ids)
if use_cache and past_key_values is None:
past_key_values = DynamicCache(config=self.config)
if cache_position is None:
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
cache_position: torch.Tensor = torch.arange(past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device)
if position_ids is None:
position_ids = cache_position.unsqueeze(0)
causal_mask = create_causal_mask(config=self.config, input_embeds=inputs_embeds, attention_mask=attention_mask, cache_position=cache_position, past_key_values=past_key_values, position_ids=position_ids)
hidden_states = inputs_embeds
position_embeddings = self.rotary_emb(hidden_states, position_ids)
for decoder_layer in self.layers[:self.config.num_hidden_layers]:
hidden_states = decoder_layer(hidden_states, attention_mask=causal_mask, position_ids=position_ids, past_key_values=past_key_values, cache_position=cache_position, position_embeddings=position_embeddings, **kwargs)
hidden_states = self.norm(hidden_states)
return BaseModelOutputWithPast(last_hidden_state=hidden_states, past_key_values=past_key_values)
|
@auto_docstring
class DiffLlamaModel(DiffLlamaPreTrainedModel):
def __init__(self, config: DiffLlamaConfig):
pass
@check_model_inputs
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, cache_position: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, **kwargs: Unpack[TransformersKwargs]) -> BaseModelOutputWithPast:
pass
| 6
| 0
| 40
| 5
| 30
| 6
| 6
| 0.22
| 1
| 16
| 10
| 0
| 5
| 7
| 6
| 7
| 257
| 34
| 184
| 65
| 146
| 40
| 89
| 34
| 82
| 21
| 2
| 2
| 37
|
2,116
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/diffllama/modeling_diffllama.py
|
transformers.models.diffllama.modeling_diffllama.DiffLlamaPreTrainedModel
|
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple, logging
from ...modeling_utils import PreTrainedModel
from .configuration_diffllama import DiffLlamaConfig
@auto_docstring
class DiffLlamaPreTrainedModel(PreTrainedModel):
config: DiffLlamaConfig
base_model_prefix = 'model'
supports_gradient_checkpointing = True
_no_split_modules = ['DiffLlamaDecoderLayer']
_skip_keys_device_placement = ['past_key_values']
_supports_flash_attn = True
_supports_sdpa = True
_supports_flex_attn = False
_can_compile_fullgraph = True
_supports_attention_backend = False
_can_record_outputs = {'hidden_states': DiffLlamaDecoderLayer, 'attentions': DiffLlamaAttention}
def _init_weights(self, module):
super()._init_weights(module)
if isinstance(module, DiffLlamaAttention):
module.lambda_q1.data.normal_(0, self.config.lambda_std_dev)
module.lambda_k1.data.normal_(0, self.config.lambda_std_dev)
module.lambda_q2.data.normal_(0, self.config.lambda_std_dev)
module.lambda_k2.data.normal_(0, self.config.lambda_std_dev)
|
@auto_docstring
class DiffLlamaPreTrainedModel(PreTrainedModel):
def _init_weights(self, module):
pass
| 3
| 0
| 10
| 0
| 10
| 0
| 5
| 0
| 1
| 0
| 0
| 5
| 1
| 0
| 1
| 1
| 24
| 1
| 23
| 15
| 21
| 0
| 22
| 15
| 20
| 5
| 1
| 2
| 5
|
2,117
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/diffllama/modeling_diffllama.py
|
transformers.models.diffllama.modeling_diffllama.DiffLlamaRMSNorm
|
import torch
from ...integrations import use_kernel_forward_from_hub
from torch import nn
@use_kernel_forward_from_hub('RMSNorm')
class DiffLlamaRMSNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-06):
"""
DiffLlamaRMSNorm is equivalent to T5LayerNorm
"""
super().__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.variance_epsilon = eps
def forward(self, hidden_states):
input_dtype = hidden_states.dtype
hidden_states = hidden_states.to(torch.float32)
variance = hidden_states.pow(2).mean(-1, keepdim=True)
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
return self.weight * hidden_states.to(input_dtype)
def extra_repr(self):
return f'{tuple(self.weight.shape)}, eps={self.variance_epsilon}'
|
@use_kernel_forward_from_hub('RMSNorm')
class DiffLlamaRMSNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-06):
'''
DiffLlamaRMSNorm is equivalent to T5LayerNorm
'''
pass
def forward(self, hidden_states):
pass
def extra_repr(self):
pass
| 5
| 1
| 5
| 0
| 4
| 1
| 1
| 0.23
| 1
| 2
| 0
| 0
| 3
| 2
| 3
| 13
| 18
| 2
| 13
| 8
| 9
| 3
| 13
| 8
| 9
| 1
| 1
| 0
| 3
|
2,118
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/diffllama/modeling_diffllama.py
|
transformers.models.diffllama.modeling_diffllama.DiffLlamaRotaryEmbedding
|
from torch import nn
from ...modeling_rope_utils import ROPE_INIT_FUNCTIONS, dynamic_rope_update
import torch
from .configuration_diffllama import DiffLlamaConfig
class DiffLlamaRotaryEmbedding(nn.Module):
inv_freq: torch.Tensor
def __init__(self, config: DiffLlamaConfig, device=None):
super().__init__()
if hasattr(config, 'rope_scaling') and isinstance(config.rope_scaling, dict):
self.rope_type = config.rope_scaling.get('rope_type', config.rope_scaling.get('type'))
else:
self.rope_type = 'default'
self.max_seq_len_cached = config.max_position_embeddings
self.original_max_seq_len = config.max_position_embeddings
self.config = config
self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device)
self.register_buffer('inv_freq', inv_freq, persistent=False)
self.original_inv_freq = self.inv_freq
@torch.no_grad()
@dynamic_rope_update
def forward(self, x, position_ids):
inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device)
position_ids_expanded = position_ids[:, None, :].float()
device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != 'mps' else 'cpu'
with torch.autocast(device_type=device_type, enabled=False):
freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
emb = torch.cat((freqs, freqs), dim=-1)
cos = emb.cos() * self.attention_scaling
sin = emb.sin() * self.attention_scaling
return (cos.to(dtype=x.dtype), sin.to(dtype=x.dtype))
|
class DiffLlamaRotaryEmbedding(nn.Module):
def __init__(self, config: DiffLlamaConfig, device=None):
pass
@torch.no_grad()
@dynamic_rope_update
def forward(self, x, position_ids):
pass
| 5
| 0
| 18
| 2
| 13
| 5
| 3
| 0.35
| 1
| 4
| 1
| 0
| 3
| 7
| 3
| 13
| 59
| 8
| 40
| 21
| 35
| 14
| 38
| 20
| 34
| 3
| 1
| 1
| 8
|
2,119
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/diffllama/modeling_diffllama.py
|
transformers.models.diffllama.modeling_diffllama.DiffLlamaSdpaAttention
|
from typing import Optional, Union
import torch
from ...utils.deprecation import deprecate_kwarg
from ...cache_utils import Cache, DynamicCache, StaticCache
class DiffLlamaSdpaAttention(DiffLlamaAttention):
"""
DiffLlama attention module using torch.nn.functional.scaled_dot_product_attention. This module inherits from
`DiffLlamaAttention` as the weights of the module stays untouched. The only changes are on the forward pass to adapt to
SDPA API.
"""
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, position_embeddings: tuple[torch.Tensor, torch.Tensor], attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, use_cache: bool=False, cache_position: Optional[torch.LongTensor]=None, **kwargs) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
bsz, q_len, _ = hidden_states.size()
query_states = self.q_proj(hidden_states)
key_states = self.k_proj(hidden_states)
value_states = self.v_proj(hidden_states)
query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
if past_key_values is not None:
cache_kwargs = {'sin': sin, 'cos': cos, 'cache_position': cache_position}
key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
key_states = repeat_kv(key_states, self.num_key_value_groups)
value_states = repeat_kv(value_states, self.num_key_value_groups)
value_states = torch.cat(torch.chunk(value_states, 2, dim=1), dim=-1)
value_states = value_states.repeat(1, 2, 1, 1)
causal_mask = attention_mask
if attention_mask is not None:
causal_mask = causal_mask[:, :, :, :key_states.shape[-2]]
if query_states.device.type == 'cuda' and causal_mask is not None:
query_states = query_states.contiguous()
key_states = key_states.contiguous()
value_states = value_states.contiguous()
is_causal = causal_mask is None and q_len > 1
attn_output = torch.nn.functional.scaled_dot_product_attention(query_states, key_states, value_states, attn_mask=causal_mask, dropout_p=self.attention_dropout if self.training else 0.0, is_causal=is_causal)
attn_output1, attn_output2 = torch.chunk(attn_output, 2, dim=1)
lambda_1 = torch.exp(torch.sum(self.lambda_q1 * self.lambda_k1, dim=-1, dtype=torch.float32)).to(query_states.dtype)
lambda_2 = torch.exp(torch.sum(self.lambda_q2 * self.lambda_k2, dim=-1, dtype=torch.float32)).to(query_states.dtype)
lambda_full = lambda_1 - lambda_2 + self.lambda_init
attn_output = attn_output1 - lambda_full * attn_output2
attn_output = (1 - self.lambda_init) * self.groupnorm(attn_output)
attn_output = attn_output.transpose(1, 2).contiguous()
attn_output = attn_output.view(bsz, q_len, -1)
attn_output = self.o_proj(attn_output)
return (attn_output, None)
|
class DiffLlamaSdpaAttention(DiffLlamaAttention):
'''
DiffLlama attention module using torch.nn.functional.scaled_dot_product_attention. This module inherits from
`DiffLlamaAttention` as the weights of the module stays untouched. The only changes are on the forward pass to adapt to
SDPA API.
'''
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, position_embeddings: tuple[torch.Tensor, torch.Tensor], attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, use_cache: bool=False, cache_position: Optional[torch.LongTensor]=None, **kwargs) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
pass
| 3
| 1
| 93
| 14
| 73
| 6
| 7
| 0.16
| 1
| 4
| 1
| 0
| 1
| 2
| 1
| 13
| 101
| 15
| 74
| 26
| 61
| 12
| 40
| 15
| 38
| 7
| 2
| 1
| 7
|
2,120
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/diffllama/modular_diffllama.py
|
transformers.models.diffllama.modular_diffllama.DiffLlamaAttention
|
import torch
from ...utils.deprecation import deprecate_kwarg
from torch import nn
from ...cache_utils import Cache, StaticCache
from ..llama.modeling_llama import LlamaDecoderLayer, LlamaForQuestionAnswering, LlamaForSequenceClassification, LlamaForTokenClassification, LlamaModel, LlamaPreTrainedModel, apply_rotary_pos_emb, repeat_kv
from typing import Optional
import math
from .configuration_diffllama import DiffLlamaConfig
class DiffLlamaAttention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, config: DiffLlamaConfig, layer_idx: Optional[int]=None):
super().__init__()
self.config = config
self.layer_idx = layer_idx
if layer_idx is None:
logger.warning_once(f'Instantiating {self.__class__.__name__} without passing a `layer_idx` is not recommended and will lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` when creating this class.')
self.attention_dropout = config.attention_dropout
self.hidden_size = config.hidden_size
self.num_heads = config.num_attention_heads
self.head_dim = getattr(config, 'head_dim', self.hidden_size // self.num_heads)
self.num_key_value_heads = config.num_key_value_heads
self.num_key_value_groups = self.num_heads // self.num_key_value_heads
self.max_position_embeddings = config.max_position_embeddings
self.rope_theta = config.rope_theta
self.is_causal = True
self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=config.attention_bias)
self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.attention_bias)
self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.attention_bias)
self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=config.attention_bias)
self.lambda_init = lambda_init_fn(layer_idx)
self.lambda_q1 = nn.Parameter(torch.normal(0, config.lambda_std_dev, size=(self.head_dim,)))
self.lambda_k1 = nn.Parameter(torch.normal(0, config.lambda_std_dev, size=(self.head_dim,)))
self.lambda_q2 = nn.Parameter(torch.normal(0, config.lambda_std_dev, size=(self.head_dim,)))
self.lambda_k2 = nn.Parameter(torch.normal(0, config.lambda_std_dev, size=(self.head_dim,)))
self.groupnorm = nn.RMSNorm(2 * self.head_dim, eps=config.rms_norm_eps, elementwise_affine=False)
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, position_embeddings: tuple[torch.Tensor, torch.Tensor], attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, use_cache: bool=False, cache_position: Optional[torch.LongTensor]=None, **kwargs) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
bsz, target_len, _ = hidden_states.size()
q_len = target_len
query_states = self.q_proj(hidden_states)
key_states = self.k_proj(hidden_states)
value_states = self.v_proj(hidden_states)
query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
if past_key_values is not None:
cache_kwargs = {'sin': sin, 'cos': cos, 'cache_position': cache_position}
key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
key_states = repeat_kv(key_states, self.num_key_value_groups)
value_states = repeat_kv(value_states, self.num_key_value_groups)
value_states = torch.cat(torch.chunk(value_states, 2, dim=1), dim=-1)
value_states = value_states.repeat(1, 2, 1, 1)
attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
if attention_mask is not None:
causal_mask = attention_mask[:, :, :, :key_states.shape[-2]]
attn_weights = attn_weights + causal_mask
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
attn_weights = nn.functional.dropout(attn_weights, p=self.attention_dropout, training=self.training)
lambda_1 = torch.exp(torch.sum(self.lambda_q1 * self.lambda_k1, dim=-1, dtype=torch.float32)).to(query_states.dtype)
lambda_2 = torch.exp(torch.sum(self.lambda_q2 * self.lambda_k2, dim=-1, dtype=torch.float32)).to(query_states.dtype)
lambda_full = lambda_1 - lambda_2 + self.lambda_init
attn_output = torch.matmul(attn_weights, value_states)
attn_output1, attn_output2 = torch.chunk(attn_output, 2, dim=1)
attn_output = attn_output1 - lambda_full * attn_output2
attn_output = (1 - self.lambda_init) * self.groupnorm(attn_output)
attn_output = attn_output.transpose(1, 2).contiguous()
attn_output = attn_output.reshape(bsz, q_len, -1)
attn_output = self.o_proj(attn_output)
return (attn_output, attn_weights)
|
class DiffLlamaAttention(nn.Module):
'''Multi-headed attention from 'Attention Is All You Need' paper'''
def __init__(self, config: DiffLlamaConfig, layer_idx: Optional[int]=None):
pass
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, position_embeddings: tuple[torch.Tensor, torch.Tensor], attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, use_cache: bool=False, cache_position: Optional[torch.LongTensor]=None, **kwargs) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
pass
| 4
| 1
| 50
| 8
| 41
| 2
| 3
| 0.06
| 1
| 6
| 2
| 2
| 2
| 21
| 2
| 12
| 104
| 18
| 82
| 49
| 68
| 5
| 63
| 38
| 60
| 4
| 1
| 1
| 6
|
2,121
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/diffllama/modular_diffllama.py
|
transformers.models.diffllama.modular_diffllama.DiffLlamaDecoderLayer
|
from .configuration_diffllama import DiffLlamaConfig
from ..llama.modeling_llama import LlamaDecoderLayer, LlamaForQuestionAnswering, LlamaForSequenceClassification, LlamaForTokenClassification, LlamaModel, LlamaPreTrainedModel, apply_rotary_pos_emb, repeat_kv
class DiffLlamaDecoderLayer(LlamaDecoderLayer):
def __init__(self, config: DiffLlamaConfig, layer_idx: int):
super().__init__(config, layer_idx)
self.self_attn = DIFFLLAMA_ATTENTION_CLASSES[config._attn_implementation](config=config, layer_idx=layer_idx)
|
class DiffLlamaDecoderLayer(LlamaDecoderLayer):
def __init__(self, config: DiffLlamaConfig, layer_idx: int):
pass
| 2
| 0
| 4
| 1
| 3
| 0
| 1
| 0
| 1
| 3
| 1
| 0
| 1
| 1
| 1
| 13
| 5
| 1
| 4
| 3
| 2
| 0
| 4
| 3
| 2
| 1
| 2
| 0
| 1
|
2,122
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/diffllama/modular_diffllama.py
|
transformers.models.diffllama.modular_diffllama.DiffLlamaFlashAttention2
|
from ...modeling_flash_attention_utils import _flash_attention_forward, flash_attn_supports_top_left_mask
from typing import Optional
from ...cache_utils import Cache, StaticCache
from ...utils.deprecation import deprecate_kwarg
from ..llama.modeling_llama import LlamaDecoderLayer, LlamaForQuestionAnswering, LlamaForSequenceClassification, LlamaForTokenClassification, LlamaModel, LlamaPreTrainedModel, apply_rotary_pos_emb, repeat_kv
import torch
class DiffLlamaFlashAttention2(DiffLlamaAttention):
"""
DiffLlama flash attention module. This module inherits from `DiffLlamaAttention` as the weights of the module stays
untouched. The only required change would be on the forward pass where it needs to correctly call the public API of
flash attention and deal with padding tokens in case the input contains any of them.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._flash_attn_uses_top_left_mask = flash_attn_supports_top_left_mask()
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, position_embeddings: tuple[torch.Tensor, torch.Tensor], attention_mask: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, use_cache: bool=False, cache_position: Optional[torch.LongTensor]=None) -> tuple[torch.Tensor, None]:
if isinstance(past_key_values, StaticCache):
raise ValueError('`static` cache implementation is not compatible with `attn_implementation==flash_attention_2` make sure to use `sdpa` in the mean time, and open an issue at https://github.com/huggingface/transformers')
bsz, q_len, _ = hidden_states.size()
query_states = self.q_proj(hidden_states)
key_states = self.k_proj(hidden_states)
value_states = self.v_proj(hidden_states)
query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
if position_embeddings is None:
logger.warning_once('The attention layers in this model are transitioning from computing the RoPE embeddings internally through `position_ids` (2D tensor with the indexes of the tokens), to using externally computed `position_embeddings` (Tuple of tensors, containing cos and sin). In v4.46 `position_ids` will be removed and `position_embeddings` will be mandatory.')
cos, sin = self.rotary_emb(value_states, position_ids)
else:
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
if past_key_values is not None:
cache_kwargs = {'sin': sin, 'cos': cos, 'cache_position': cache_position}
key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
query_states = query_states.transpose(1, 2)
key_states = key_states.transpose(1, 2)
value_states = value_states.transpose(1, 2)
dropout_rate = self.attention_dropout if self.training else 0.0
input_dtype = query_states.dtype
device_type = query_states.device.type if query_states.device.type != 'mps' else 'cpu'
if input_dtype == torch.float32:
if torch.is_autocast_enabled():
target_dtype = torch.get_autocast_dtype(device_type) if hasattr(torch, 'get_autocast_dtype') else torch.get_autocast_gpu_dtype()
elif hasattr(self.config, '_pre_quantization_dtype'):
target_dtype = self.config._pre_quantization_dtype
else:
target_dtype = self.q_proj.weight.dtype
logger.warning_once(f'The input hidden states seems to be silently casted in float32, this might be related to the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in {target_dtype}.')
query_states = query_states.to(target_dtype)
key_states = key_states.to(target_dtype)
value_states = value_states.to(target_dtype)
value_states1, value_states2 = torch.chunk(value_states, 2, dim=2)
value_states1 = value_states1.repeat(1, 1, 2, 1)
value_states2 = value_states2.repeat(1, 1, 2, 1)
attn_output1 = _flash_attention_forward(query_states, key_states, value_states1, attention_mask, q_len, position_ids=position_ids, dropout=dropout_rate, sliding_window=getattr(self, 'sliding_window', None), use_top_left_mask=self._flash_attn_uses_top_left_mask, is_causal=self.is_causal)
attn_output2 = _flash_attention_forward(query_states, key_states, value_states2, attention_mask, q_len, position_ids=position_ids, dropout=dropout_rate, sliding_window=getattr(self, 'sliding_window', None), use_top_left_mask=self._flash_attn_uses_top_left_mask, is_causal=self.is_causal)
attn_output = torch.cat([attn_output1, attn_output2], dim=-1)
attn_output1, attn_output2 = torch.chunk(attn_output, 2, dim=2)
lambda_1 = torch.exp(torch.sum(self.lambda_q1 * self.lambda_k1, dim=-1, dtype=torch.float32)).to(query_states.dtype)
lambda_2 = torch.exp(torch.sum(self.lambda_q2 * self.lambda_k2, dim=-1, dtype=torch.float32)).to(query_states.dtype)
lambda_full = lambda_1 - lambda_2 + self.lambda_init
attn_output = attn_output1 - lambda_full * attn_output2
attn_output = (1 - self.lambda_init) * self.groupnorm(attn_output)
attn_output = attn_output.reshape(bsz, q_len, -1).contiguous()
attn_output = self.o_proj(attn_output)
return (attn_output, None)
|
class DiffLlamaFlashAttention2(DiffLlamaAttention):
'''
DiffLlama flash attention module. This module inherits from `DiffLlamaAttention` as the weights of the module stays
untouched. The only required change would be on the forward pass where it needs to correctly call the public API of
flash attention and deal with padding tokens in case the input contains any of them.
'''
def __init__(self, *args, **kwargs):
pass
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, position_embeddings: tuple[torch.Tensor, torch.Tensor], attention_mask: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, use_cache: bool=False, cache_position: Optional[torch.LongTensor]=None) -> tuple[torch.Tensor, None]:
pass
| 4
| 1
| 70
| 11
| 52
| 8
| 5
| 0.19
| 1
| 6
| 2
| 0
| 2
| 3
| 2
| 14
| 148
| 23
| 105
| 31
| 92
| 20
| 54
| 21
| 51
| 9
| 2
| 2
| 10
|
2,123
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/diffllama/modular_diffllama.py
|
transformers.models.diffllama.modular_diffllama.DiffLlamaForCausalLM
|
from ..gemma.modeling_gemma import GemmaForCausalLM
class DiffLlamaForCausalLM(GemmaForCausalLM):
pass
|
class DiffLlamaForCausalLM(GemmaForCausalLM):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 9
| 2
| 0
| 2
| 1
| 1
| 0
| 2
| 1
| 1
| 0
| 3
| 0
| 0
|
2,124
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/diffllama/modular_diffllama.py
|
transformers.models.diffllama.modular_diffllama.DiffLlamaForQuestionAnswering
|
from ..llama.modeling_llama import LlamaDecoderLayer, LlamaForQuestionAnswering, LlamaForSequenceClassification, LlamaForTokenClassification, LlamaModel, LlamaPreTrainedModel, apply_rotary_pos_emb, repeat_kv
class DiffLlamaForQuestionAnswering(LlamaForQuestionAnswering):
pass
|
class DiffLlamaForQuestionAnswering(LlamaForQuestionAnswering):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 5
| 2
| 0
| 2
| 1
| 1
| 0
| 2
| 1
| 1
| 0
| 3
| 0
| 0
|
2,125
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/diffllama/modular_diffllama.py
|
transformers.models.diffllama.modular_diffllama.DiffLlamaForSequenceClassification
|
from ..llama.modeling_llama import LlamaDecoderLayer, LlamaForQuestionAnswering, LlamaForSequenceClassification, LlamaForTokenClassification, LlamaModel, LlamaPreTrainedModel, apply_rotary_pos_emb, repeat_kv
class DiffLlamaForSequenceClassification(LlamaForSequenceClassification):
pass
|
class DiffLlamaForSequenceClassification(LlamaForSequenceClassification):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 5
| 2
| 0
| 2
| 1
| 1
| 0
| 2
| 1
| 1
| 0
| 3
| 0
| 0
|
2,126
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/diffllama/modular_diffllama.py
|
transformers.models.diffllama.modular_diffllama.DiffLlamaForTokenClassification
|
from ..llama.modeling_llama import LlamaDecoderLayer, LlamaForQuestionAnswering, LlamaForSequenceClassification, LlamaForTokenClassification, LlamaModel, LlamaPreTrainedModel, apply_rotary_pos_emb, repeat_kv
class DiffLlamaForTokenClassification(LlamaForTokenClassification):
pass
|
class DiffLlamaForTokenClassification(LlamaForTokenClassification):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 5
| 2
| 0
| 2
| 1
| 1
| 0
| 2
| 1
| 1
| 0
| 3
| 0
| 0
|
2,127
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/diffllama/modular_diffllama.py
|
transformers.models.diffllama.modular_diffllama.DiffLlamaMLP
|
from ..mistral.modeling_mistral import MistralMLP
class DiffLlamaMLP(MistralMLP):
pass
|
class DiffLlamaMLP(MistralMLP):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 12
| 2
| 0
| 2
| 1
| 1
| 0
| 2
| 1
| 1
| 0
| 2
| 0
| 0
|
2,128
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/diffllama/modular_diffllama.py
|
transformers.models.diffllama.modular_diffllama.DiffLlamaModel
|
from ..llama.modeling_llama import LlamaDecoderLayer, LlamaForQuestionAnswering, LlamaForSequenceClassification, LlamaForTokenClassification, LlamaModel, LlamaPreTrainedModel, apply_rotary_pos_emb, repeat_kv
class DiffLlamaModel(LlamaModel):
pass
|
class DiffLlamaModel(LlamaModel):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 7
| 2
| 0
| 2
| 1
| 1
| 0
| 2
| 1
| 1
| 0
| 3
| 0
| 0
|
2,129
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/diffllama/modular_diffllama.py
|
transformers.models.diffllama.modular_diffllama.DiffLlamaPreTrainedModel
|
from ..llama.modeling_llama import LlamaDecoderLayer, LlamaForQuestionAnswering, LlamaForSequenceClassification, LlamaForTokenClassification, LlamaModel, LlamaPreTrainedModel, apply_rotary_pos_emb, repeat_kv
from ...modeling_utils import PreTrainedModel
class DiffLlamaPreTrainedModel(LlamaPreTrainedModel):
_supports_flex_attn = False
_supports_attention_backend = False
def _init_weights(self, module):
PreTrainedModel._init_weights(self, module)
if isinstance(module, DiffLlamaAttention):
module.lambda_q1.data.normal_(0, self.config.lambda_std_dev)
module.lambda_k1.data.normal_(0, self.config.lambda_std_dev)
module.lambda_q2.data.normal_(0, self.config.lambda_std_dev)
module.lambda_k2.data.normal_(0, self.config.lambda_std_dev)
|
class DiffLlamaPreTrainedModel(LlamaPreTrainedModel):
def _init_weights(self, module):
pass
| 2
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 3
| 0
| 3
| 3
| 2
| 0
| 3
| 3
| 2
| 0
| 2
| 0
| 0
|
2,130
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/diffllama/modular_diffllama.py
|
transformers.models.diffllama.modular_diffllama.DiffLlamaSdpaAttention
|
from ...utils.deprecation import deprecate_kwarg
from typing import Optional
from ..llama.modeling_llama import LlamaDecoderLayer, LlamaForQuestionAnswering, LlamaForSequenceClassification, LlamaForTokenClassification, LlamaModel, LlamaPreTrainedModel, apply_rotary_pos_emb, repeat_kv
from ...cache_utils import Cache, StaticCache
import torch
class DiffLlamaSdpaAttention(DiffLlamaAttention):
"""
DiffLlama attention module using torch.nn.functional.scaled_dot_product_attention. This module inherits from
`DiffLlamaAttention` as the weights of the module stays untouched. The only changes are on the forward pass to adapt to
SDPA API.
"""
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, position_embeddings: tuple[torch.Tensor, torch.Tensor], attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, use_cache: bool=False, cache_position: Optional[torch.LongTensor]=None, **kwargs) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
bsz, q_len, _ = hidden_states.size()
query_states = self.q_proj(hidden_states)
key_states = self.k_proj(hidden_states)
value_states = self.v_proj(hidden_states)
query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
if past_key_values is not None:
cache_kwargs = {'sin': sin, 'cos': cos, 'cache_position': cache_position}
key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
key_states = repeat_kv(key_states, self.num_key_value_groups)
value_states = repeat_kv(value_states, self.num_key_value_groups)
value_states = torch.cat(torch.chunk(value_states, 2, dim=1), dim=-1)
value_states = value_states.repeat(1, 2, 1, 1)
causal_mask = attention_mask
if attention_mask is not None:
causal_mask = causal_mask[:, :, :, :key_states.shape[-2]]
if query_states.device.type == 'cuda' and causal_mask is not None:
query_states = query_states.contiguous()
key_states = key_states.contiguous()
value_states = value_states.contiguous()
is_causal = causal_mask is None and q_len > 1
attn_output = torch.nn.functional.scaled_dot_product_attention(query_states, key_states, value_states, attn_mask=causal_mask, dropout_p=self.attention_dropout if self.training else 0.0, is_causal=is_causal)
attn_output1, attn_output2 = torch.chunk(attn_output, 2, dim=1)
lambda_1 = torch.exp(torch.sum(self.lambda_q1 * self.lambda_k1, dim=-1, dtype=torch.float32)).to(query_states.dtype)
lambda_2 = torch.exp(torch.sum(self.lambda_q2 * self.lambda_k2, dim=-1, dtype=torch.float32)).to(query_states.dtype)
lambda_full = lambda_1 - lambda_2 + self.lambda_init
attn_output = attn_output1 - lambda_full * attn_output2
attn_output = (1 - self.lambda_init) * self.groupnorm(attn_output)
attn_output = attn_output.transpose(1, 2).contiguous()
attn_output = attn_output.view(bsz, q_len, -1)
attn_output = self.o_proj(attn_output)
return (attn_output, None)
|
class DiffLlamaSdpaAttention(DiffLlamaAttention):
'''
DiffLlama attention module using torch.nn.functional.scaled_dot_product_attention. This module inherits from
`DiffLlamaAttention` as the weights of the module stays untouched. The only changes are on the forward pass to adapt to
SDPA API.
'''
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, position_embeddings: tuple[torch.Tensor, torch.Tensor], attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, use_cache: bool=False, cache_position: Optional[torch.LongTensor]=None, **kwargs) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
pass
| 3
| 1
| 93
| 14
| 73
| 6
| 7
| 0.16
| 1
| 4
| 1
| 0
| 1
| 2
| 1
| 13
| 101
| 15
| 74
| 26
| 61
| 12
| 40
| 15
| 38
| 7
| 2
| 1
| 7
|
2,131
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/dinat/configuration_dinat.py
|
transformers.models.dinat.configuration_dinat.DinatConfig
|
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
from ...configuration_utils import PretrainedConfig
class DinatConfig(BackboneConfigMixin, PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`DinatModel`]. It is used to instantiate a Dinat
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the Dinat
[shi-labs/dinat-mini-in1k-224](https://huggingface.co/shi-labs/dinat-mini-in1k-224) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
patch_size (`int`, *optional*, defaults to 4):
The size (resolution) of each patch. NOTE: Only patch size of 4 is supported at the moment.
num_channels (`int`, *optional*, defaults to 3):
The number of input channels.
embed_dim (`int`, *optional*, defaults to 64):
Dimensionality of patch embedding.
depths (`list[int]`, *optional*, defaults to `[3, 4, 6, 5]`):
Number of layers in each level of the encoder.
num_heads (`list[int]`, *optional*, defaults to `[2, 4, 8, 16]`):
Number of attention heads in each layer of the Transformer encoder.
kernel_size (`int`, *optional*, defaults to 7):
Neighborhood Attention kernel size.
dilations (`list[list[int]]`, *optional*, defaults to `[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]]`):
Dilation value of each NA layer in the Transformer encoder.
mlp_ratio (`float`, *optional*, defaults to 3.0):
Ratio of MLP hidden dimensionality to embedding dimensionality.
qkv_bias (`bool`, *optional*, defaults to `True`):
Whether or not a learnable bias should be added to the queries, keys and values.
hidden_dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout probability for all fully connected layers in the embeddings and encoder.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
drop_path_rate (`float`, *optional*, defaults to 0.1):
Stochastic depth rate.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder. If string, `"gelu"`, `"relu"`,
`"selu"` and `"gelu_new"` are supported.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the layer normalization layers.
layer_scale_init_value (`float`, *optional*, defaults to 0.0):
The initial value for the layer scale. Disabled if <=0.
out_features (`list[str]`, *optional*):
If used as backbone, list of features to output. Can be any of `"stem"`, `"stage1"`, `"stage2"`, etc.
(depending on how many stages the model has). If unset and `out_indices` is set, will default to the
corresponding stages. If unset and `out_indices` is unset, will default to the last stage. Must be in the
same order as defined in the `stage_names` attribute.
out_indices (`list[int]`, *optional*):
If used as backbone, list of indices of features to output. Can be any of 0, 1, 2, etc. (depending on how
many stages the model has). If unset and `out_features` is set, will default to the corresponding stages.
If unset and `out_features` is unset, will default to the last stage. Must be in the
same order as defined in the `stage_names` attribute.
Example:
```python
>>> from transformers import DinatConfig, DinatModel
>>> # Initializing a Dinat shi-labs/dinat-mini-in1k-224 style configuration
>>> configuration = DinatConfig()
>>> # Initializing a model (with random weights) from the shi-labs/dinat-mini-in1k-224 style configuration
>>> model = DinatModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = 'dinat'
attribute_map = {'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'}
def __init__(self, patch_size=4, num_channels=3, embed_dim=64, depths=[3, 4, 6, 5], num_heads=[2, 4, 8, 16], kernel_size=7, dilations=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]], mlp_ratio=3.0, qkv_bias=True, hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, drop_path_rate=0.1, hidden_act='gelu', initializer_range=0.02, layer_norm_eps=1e-05, layer_scale_init_value=0.0, out_features=None, out_indices=None, **kwargs):
super().__init__(**kwargs)
self.patch_size = patch_size
self.num_channels = num_channels
self.embed_dim = embed_dim
self.depths = depths
self.num_layers = len(depths)
self.num_heads = num_heads
self.kernel_size = kernel_size
self.dilations = dilations
self.mlp_ratio = mlp_ratio
self.qkv_bias = qkv_bias
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.drop_path_rate = drop_path_rate
self.hidden_act = hidden_act
self.layer_norm_eps = layer_norm_eps
self.initializer_range = initializer_range
self.hidden_size = int(embed_dim * 2 ** (len(depths) - 1))
self.layer_scale_init_value = layer_scale_init_value
self.stage_names = ['stem'] + [f'stage{idx}' for idx in range(1, len(depths) + 1)]
self._out_features, self._out_indices = get_aligned_output_features_output_indices(out_features=out_features, out_indices=out_indices, stage_names=self.stage_names)
|
class DinatConfig(BackboneConfigMixin, PretrainedConfig):
'''
This is the configuration class to store the configuration of a [`DinatModel`]. It is used to instantiate a Dinat
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the Dinat
[shi-labs/dinat-mini-in1k-224](https://huggingface.co/shi-labs/dinat-mini-in1k-224) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
patch_size (`int`, *optional*, defaults to 4):
The size (resolution) of each patch. NOTE: Only patch size of 4 is supported at the moment.
num_channels (`int`, *optional*, defaults to 3):
The number of input channels.
embed_dim (`int`, *optional*, defaults to 64):
Dimensionality of patch embedding.
depths (`list[int]`, *optional*, defaults to `[3, 4, 6, 5]`):
Number of layers in each level of the encoder.
num_heads (`list[int]`, *optional*, defaults to `[2, 4, 8, 16]`):
Number of attention heads in each layer of the Transformer encoder.
kernel_size (`int`, *optional*, defaults to 7):
Neighborhood Attention kernel size.
dilations (`list[list[int]]`, *optional*, defaults to `[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]]`):
Dilation value of each NA layer in the Transformer encoder.
mlp_ratio (`float`, *optional*, defaults to 3.0):
Ratio of MLP hidden dimensionality to embedding dimensionality.
qkv_bias (`bool`, *optional*, defaults to `True`):
Whether or not a learnable bias should be added to the queries, keys and values.
hidden_dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout probability for all fully connected layers in the embeddings and encoder.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
drop_path_rate (`float`, *optional*, defaults to 0.1):
Stochastic depth rate.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder. If string, `"gelu"`, `"relu"`,
`"selu"` and `"gelu_new"` are supported.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the layer normalization layers.
layer_scale_init_value (`float`, *optional*, defaults to 0.0):
The initial value for the layer scale. Disabled if <=0.
out_features (`list[str]`, *optional*):
If used as backbone, list of features to output. Can be any of `"stem"`, `"stage1"`, `"stage2"`, etc.
(depending on how many stages the model has). If unset and `out_indices` is set, will default to the
corresponding stages. If unset and `out_indices` is unset, will default to the last stage. Must be in the
same order as defined in the `stage_names` attribute.
out_indices (`list[int]`, *optional*):
If used as backbone, list of indices of features to output. Can be any of 0, 1, 2, etc. (depending on how
many stages the model has). If unset and `out_features` is set, will default to the corresponding stages.
If unset and `out_features` is unset, will default to the last stage. Must be in the
same order as defined in the `stage_names` attribute.
Example:
```python
>>> from transformers import DinatConfig, DinatModel
>>> # Initializing a Dinat shi-labs/dinat-mini-in1k-224 style configuration
>>> configuration = DinatConfig()
>>> # Initializing a model (with random weights) from the shi-labs/dinat-mini-in1k-224 style configuration
>>> model = DinatModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```'''
def __init__(self, patch_size=4, num_channels=3, embed_dim=64, depths=[3, 4, 6, 5], num_heads=[2, 4, 8, 16], kernel_size=7, dilations=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]], mlp_ratio=3.0, qkv_bias=True, hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, drop_path_rate=0.1, hidden_act='gelu', initializer_range=0.02, layer_norm_eps=1e-05, layer_scale_init_value=0.0, out_features=None, out_indices=None, **kwargs):
pass
| 2
| 1
| 48
| 1
| 45
| 2
| 1
| 1.24
| 2
| 3
| 0
| 0
| 1
| 21
| 1
| 6
| 125
| 11
| 51
| 45
| 28
| 63
| 25
| 24
| 23
| 1
| 1
| 0
| 1
|
2,132
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/dinat/modeling_dinat.py
|
transformers.models.dinat.modeling_dinat.DinatBackbone
|
import torch
from ...utils import ModelOutput, OptionalDependencyNotAvailable, auto_docstring, is_natten_available, logging, requires_backends
from ...utils.backbone_utils import BackboneMixin
from ...modeling_outputs import BackboneOutput
from torch import nn
from typing import Optional, Union
@auto_docstring(custom_intro='\n NAT backbone, to be used with frameworks like DETR and MaskFormer.\n ')
class DinatBackbone(DinatPreTrainedModel, BackboneMixin):
def __init__(self, config):
super().__init__(config)
super()._init_backbone(config)
requires_backends(self, ['natten'])
self.embeddings = DinatEmbeddings(config)
self.encoder = DinatEncoder(config)
self.num_features = [config.embed_dim] + [int(config.embed_dim * 2 ** i) for i in range(len(config.depths))]
hidden_states_norms = {}
for stage, num_channels in zip(self._out_features, self.channels):
hidden_states_norms[stage] = nn.LayerNorm(num_channels)
self.hidden_states_norms = nn.ModuleDict(hidden_states_norms)
self.post_init()
def get_input_embeddings(self):
return self.embeddings.patch_embeddings
@auto_docstring
def forward(self, pixel_values: torch.Tensor, output_hidden_states: Optional[bool]=None, output_attentions: Optional[bool]=None, return_dict: Optional[bool]=None) -> BackboneOutput:
"""
Examples:
```python
>>> from transformers import AutoImageProcessor, AutoBackbone
>>> import torch
>>> from PIL import Image
>>> import requests
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> processor = AutoImageProcessor.from_pretrained("shi-labs/nat-mini-in1k-224")
>>> model = AutoBackbone.from_pretrained(
... "shi-labs/nat-mini-in1k-224", out_features=["stage1", "stage2", "stage3", "stage4"]
... )
>>> inputs = processor(image, return_tensors="pt")
>>> outputs = model(**inputs)
>>> feature_maps = outputs.feature_maps
>>> list(feature_maps[-1].shape)
[1, 512, 7, 7]
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
embedding_output = self.embeddings(pixel_values)
outputs = self.encoder(embedding_output, output_attentions=output_attentions, output_hidden_states=True, output_hidden_states_before_downsampling=True, return_dict=True)
hidden_states = outputs.reshaped_hidden_states
feature_maps = ()
for stage, hidden_state in zip(self.stage_names, hidden_states):
if stage in self.out_features:
batch_size, num_channels, height, width = hidden_state.shape
hidden_state = hidden_state.permute(0, 2, 3, 1).contiguous()
hidden_state = hidden_state.view(batch_size, height * width, num_channels)
hidden_state = self.hidden_states_norms[stage](hidden_state)
hidden_state = hidden_state.view(batch_size, height, width, num_channels)
hidden_state = hidden_state.permute(0, 3, 1, 2).contiguous()
feature_maps += (hidden_state,)
if not return_dict:
output = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(feature_maps=feature_maps, hidden_states=outputs.hidden_states if output_hidden_states else None, attentions=outputs.attentions)
|
@auto_docstring(custom_intro='\n NAT backbone, to be used with frameworks like DETR and MaskFormer.\n ')
class DinatBackbone(DinatPreTrainedModel, BackboneMixin):
def __init__(self, config):
pass
def get_input_embeddings(self):
pass
@auto_docstring
def forward(self, pixel_values: torch.Tensor, output_hidden_states: Optional[bool]=None, output_attentions: Optional[bool]=None, return_dict: Optional[bool]=None) -> BackboneOutput:
'''
Examples:
```python
>>> from transformers import AutoImageProcessor, AutoBackbone
>>> import torch
>>> from PIL import Image
>>> import requests
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> processor = AutoImageProcessor.from_pretrained("shi-labs/nat-mini-in1k-224")
>>> model = AutoBackbone.from_pretrained(
... "shi-labs/nat-mini-in1k-224", out_features=["stage1", "stage2", "stage3", "stage4"]
... )
>>> inputs = processor(image, return_tensors="pt")
>>> outputs = model(**inputs)
>>> feature_maps = outputs.feature_maps
>>> list(feature_maps[-1].shape)
[1, 512, 7, 7]
```'''
pass
| 6
| 1
| 31
| 6
| 18
| 7
| 4
| 0.38
| 2
| 9
| 3
| 0
| 3
| 4
| 3
| 16
| 99
| 19
| 58
| 24
| 46
| 22
| 38
| 17
| 34
| 9
| 2
| 2
| 12
|
2,133
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/dinat/modeling_dinat.py
|
transformers.models.dinat.modeling_dinat.DinatDownsampler
|
from torch import nn
import torch
class DinatDownsampler(nn.Module):
"""
Convolutional Downsampling Layer.
Args:
dim (`int`):
Number of input channels.
norm_layer (`nn.Module`, *optional*, defaults to `nn.LayerNorm`):
Normalization layer class.
"""
def __init__(self, dim: int, norm_layer: nn.Module=nn.LayerNorm) -> None:
super().__init__()
self.dim = dim
self.reduction = nn.Conv2d(dim, 2 * dim, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)
self.norm = norm_layer(2 * dim)
def forward(self, input_feature: torch.Tensor) -> torch.Tensor:
input_feature = self.reduction(input_feature.permute(0, 3, 1, 2)).permute(0, 2, 3, 1)
input_feature = self.norm(input_feature)
return input_feature
|
class DinatDownsampler(nn.Module):
'''
Convolutional Downsampling Layer.
Args:
dim (`int`):
Number of input channels.
norm_layer (`nn.Module`, *optional*, defaults to `nn.LayerNorm`):
Normalization layer class.
'''
def __init__(self, dim: int, norm_layer: nn.Module=nn.LayerNorm) -> None:
pass
def forward(self, input_feature: torch.Tensor) -> torch.Tensor:
pass
| 3
| 1
| 5
| 0
| 5
| 0
| 1
| 0.8
| 1
| 3
| 0
| 0
| 2
| 3
| 2
| 12
| 21
| 3
| 10
| 6
| 7
| 8
| 10
| 6
| 7
| 1
| 1
| 0
| 2
|
2,134
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/dinat/modeling_dinat.py
|
transformers.models.dinat.modeling_dinat.DinatDropPath
|
import torch
from typing import Optional, Union
from torch import nn
class DinatDropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
def __init__(self, drop_prob: Optional[float]=None) -> None:
super().__init__()
self.drop_prob = drop_prob
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
return drop_path(hidden_states, self.drop_prob, self.training)
def extra_repr(self) -> str:
return f'p={self.drop_prob}'
|
class DinatDropPath(nn.Module):
'''Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).'''
def __init__(self, drop_prob: Optional[float]=None) -> None:
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
def extra_repr(self) -> str:
pass
| 4
| 1
| 2
| 0
| 2
| 0
| 1
| 0.13
| 1
| 4
| 0
| 0
| 3
| 1
| 3
| 13
| 12
| 3
| 8
| 5
| 4
| 1
| 8
| 5
| 4
| 1
| 1
| 0
| 3
|
2,135
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/dinat/modeling_dinat.py
|
transformers.models.dinat.modeling_dinat.DinatEmbeddings
|
from torch import nn
from typing import Optional, Union
import torch
class DinatEmbeddings(nn.Module):
"""
Construct the patch and position embeddings.
"""
def __init__(self, config):
super().__init__()
self.patch_embeddings = DinatPatchEmbeddings(config)
self.norm = nn.LayerNorm(config.embed_dim)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, pixel_values: Optional[torch.FloatTensor]) -> tuple[torch.Tensor]:
embeddings = self.patch_embeddings(pixel_values)
embeddings = self.norm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
|
class DinatEmbeddings(nn.Module):
'''
Construct the patch and position embeddings.
'''
def __init__(self, config):
pass
def forward(self, pixel_values: Optional[torch.FloatTensor]) -> tuple[torch.Tensor]:
pass
| 3
| 1
| 7
| 2
| 5
| 0
| 1
| 0.27
| 1
| 3
| 1
| 0
| 2
| 3
| 2
| 12
| 20
| 6
| 11
| 7
| 8
| 3
| 11
| 7
| 8
| 1
| 1
| 0
| 2
|
2,136
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/dinat/modeling_dinat.py
|
transformers.models.dinat.modeling_dinat.DinatEncoder
|
from torch import nn
from typing import Optional, Union
import torch
class DinatEncoder(nn.Module):
def __init__(self, config):
super().__init__()
self.num_levels = len(config.depths)
self.config = config
dpr = [x.item() for x in torch.linspace(0, config.drop_path_rate, sum(config.depths), device='cpu')]
self.levels = nn.ModuleList([DinatStage(config=config, dim=int(config.embed_dim * 2 ** i_layer), depth=config.depths[i_layer], num_heads=config.num_heads[i_layer], dilations=config.dilations[i_layer], drop_path_rate=dpr[sum(config.depths[:i_layer]):sum(config.depths[:i_layer + 1])], downsample=DinatDownsampler if i_layer < self.num_levels - 1 else None) for i_layer in range(self.num_levels)])
def forward(self, hidden_states: torch.Tensor, output_attentions: Optional[bool]=False, output_hidden_states: Optional[bool]=False, output_hidden_states_before_downsampling: Optional[bool]=False, return_dict: Optional[bool]=True) -> Union[tuple, DinatEncoderOutput]:
all_hidden_states = () if output_hidden_states else None
all_reshaped_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
if output_hidden_states:
reshaped_hidden_state = hidden_states.permute(0, 3, 1, 2)
all_hidden_states += (hidden_states,)
all_reshaped_hidden_states += (reshaped_hidden_state,)
for i, layer_module in enumerate(self.levels):
layer_outputs = layer_module(hidden_states, output_attentions)
hidden_states = layer_outputs[0]
hidden_states_before_downsampling = layer_outputs[1]
if output_hidden_states and output_hidden_states_before_downsampling:
reshaped_hidden_state = hidden_states_before_downsampling.permute(0, 3, 1, 2)
all_hidden_states += (hidden_states_before_downsampling,)
all_reshaped_hidden_states += (reshaped_hidden_state,)
elif output_hidden_states and (not output_hidden_states_before_downsampling):
reshaped_hidden_state = hidden_states.permute(0, 3, 1, 2)
all_hidden_states += (hidden_states,)
all_reshaped_hidden_states += (reshaped_hidden_state,)
if output_attentions:
all_self_attentions += layer_outputs[2:]
if not return_dict:
return tuple((v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None))
return DinatEncoderOutput(last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions, reshaped_hidden_states=all_reshaped_hidden_states)
|
class DinatEncoder(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states: torch.Tensor, output_attentions: Optional[bool]=False, output_hidden_states: Optional[bool]=False, output_hidden_states_before_downsampling: Optional[bool]=False, return_dict: Optional[bool]=True) -> Union[tuple, DinatEncoderOutput]:
pass
| 3
| 0
| 33
| 4
| 28
| 2
| 6
| 0.05
| 1
| 10
| 3
| 0
| 2
| 3
| 2
| 12
| 68
| 8
| 57
| 21
| 47
| 3
| 31
| 14
| 28
| 10
| 1
| 2
| 12
|
2,137
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/dinat/modeling_dinat.py
|
transformers.models.dinat.modeling_dinat.DinatEncoderOutput
|
from dataclasses import dataclass
from typing import Optional, Union
import torch
from ...utils import ModelOutput, OptionalDependencyNotAvailable, auto_docstring, is_natten_available, logging, requires_backends
@dataclass
@auto_docstring(custom_intro="\n Dinat encoder's outputs, with potential hidden states and attentions.\n ")
class DinatEncoderOutput(ModelOutput):
"""
reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, hidden_size, height, width)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to
include the spatial dimensions.
"""
last_hidden_state: Optional[torch.FloatTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
attentions: Optional[tuple[torch.FloatTensor, ...]] = None
reshaped_hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
|
@dataclass
@auto_docstring(custom_intro="\n Dinat encoder's outputs, with potential hidden states and attentions.\n ")
class DinatEncoderOutput(ModelOutput):
'''
reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, hidden_size, height, width)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to
include the spatial dimensions.
'''
pass
| 3
| 1
| 0
| 0
| 0
| 0
| 0
| 4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 30
| 5
| 5
| 5
| 4
| 20
| 5
| 5
| 4
| 0
| 1
| 0
| 0
|
2,138
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/dinat/modeling_dinat.py
|
transformers.models.dinat.modeling_dinat.DinatForImageClassification
|
from torch import nn
from ...utils import ModelOutput, OptionalDependencyNotAvailable, auto_docstring, is_natten_available, logging, requires_backends
import torch
from typing import Optional, Union
@auto_docstring(custom_intro='\n Dinat Model transformer with an image classification head on top (a linear layer on top of the final hidden state\n of the [CLS] token) e.g. for ImageNet.\n ')
class DinatForImageClassification(DinatPreTrainedModel):
def __init__(self, config):
super().__init__(config)
requires_backends(self, ['natten'])
self.num_labels = config.num_labels
self.dinat = DinatModel(config)
self.classifier = nn.Linear(self.dinat.num_features, config.num_labels) if config.num_labels > 0 else nn.Identity()
self.post_init()
@auto_docstring
def forward(self, pixel_values: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, DinatImageClassifierOutput]:
"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.dinat(pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
pooled_output = outputs[1]
logits = self.classifier(pooled_output)
loss = None
if labels is not None:
loss = self.loss_function(labels, logits, self.config)
if not return_dict:
output = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return DinatImageClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, reshaped_hidden_states=outputs.reshaped_hidden_states)
|
@auto_docstring(custom_intro='\n Dinat Model transformer with an image classification head on top (a linear layer on top of the final hidden state\n of the [CLS] token) e.g. for ImageNet.\n ')
class DinatForImageClassification(DinatPreTrainedModel):
def __init__(self, config):
pass
@auto_docstring
def forward(self, pixel_values: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, DinatImageClassifierOutput]:
'''
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
'''
pass
| 5
| 1
| 38
| 6
| 29
| 4
| 7
| 0.12
| 1
| 5
| 2
| 0
| 2
| 3
| 2
| 3
| 85
| 12
| 65
| 20
| 48
| 8
| 33
| 12
| 30
| 12
| 2
| 3
| 14
|
2,139
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/dinat/modeling_dinat.py
|
transformers.models.dinat.modeling_dinat.DinatImageClassifierOutput
|
from ...utils import ModelOutput, OptionalDependencyNotAvailable, auto_docstring, is_natten_available, logging, requires_backends
from typing import Optional, Union
import torch
from dataclasses import dataclass
@dataclass
@auto_docstring(custom_intro='\n Dinat outputs for image classification.\n ')
class DinatImageClassifierOutput(ModelOutput):
"""
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Classification (or regression if config.num_labels==1) loss.
logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`):
Classification (or regression if config.num_labels==1) scores (before SoftMax).
reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, hidden_size, height, width)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to
include the spatial dimensions.
"""
loss: Optional[torch.FloatTensor] = None
logits: Optional[torch.FloatTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
attentions: Optional[tuple[torch.FloatTensor, ...]] = None
reshaped_hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
|
@dataclass
@auto_docstring(custom_intro='\n Dinat outputs for image classification.\n ')
class DinatImageClassifierOutput(ModelOutput):
'''
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Classification (or regression if config.num_labels==1) loss.
logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`):
Classification (or regression if config.num_labels==1) scores (before SoftMax).
reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, hidden_size, height, width)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to
include the spatial dimensions.
'''
pass
| 3
| 1
| 0
| 0
| 0
| 0
| 0
| 3.67
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 33
| 5
| 6
| 6
| 5
| 22
| 6
| 6
| 5
| 0
| 1
| 0
| 0
|
2,140
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/dinat/modeling_dinat.py
|
transformers.models.dinat.modeling_dinat.DinatIntermediate
|
from torch import nn
import torch
from ...activations import ACT2FN
class DinatIntermediate(nn.Module):
def __init__(self, config, dim):
super().__init__()
self.dense = nn.Linear(dim, int(config.mlp_ratio * dim))
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
|
class DinatIntermediate(nn.Module):
def __init__(self, config, dim):
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 6
| 0
| 6
| 0
| 2
| 0
| 1
| 4
| 0
| 0
| 2
| 2
| 2
| 12
| 13
| 1
| 12
| 5
| 9
| 0
| 11
| 5
| 8
| 2
| 1
| 1
| 3
|
2,141
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/dinat/modeling_dinat.py
|
transformers.models.dinat.modeling_dinat.DinatLayer
|
import torch
from torch import nn
from typing import Optional, Union
class DinatLayer(nn.Module):
def __init__(self, config, dim, num_heads, dilation, drop_path_rate=0.0):
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.kernel_size = config.kernel_size
self.dilation = dilation
self.window_size = self.kernel_size * self.dilation
self.layernorm_before = nn.LayerNorm(dim, eps=config.layer_norm_eps)
self.attention = NeighborhoodAttentionModule(config, dim, num_heads, kernel_size=self.kernel_size, dilation=self.dilation)
self.drop_path = DinatDropPath(drop_path_rate) if drop_path_rate > 0.0 else nn.Identity()
self.layernorm_after = nn.LayerNorm(dim, eps=config.layer_norm_eps)
self.intermediate = DinatIntermediate(config, dim)
self.output = DinatOutput(config, dim)
self.layer_scale_parameters = nn.Parameter(config.layer_scale_init_value * torch.ones((2, dim)), requires_grad=True) if config.layer_scale_init_value > 0 else None
def maybe_pad(self, hidden_states, height, width):
window_size = self.window_size
pad_values = (0, 0, 0, 0, 0, 0)
if height < window_size or width < window_size:
pad_l = pad_t = 0
pad_r = max(0, window_size - width)
pad_b = max(0, window_size - height)
pad_values = (0, 0, pad_l, pad_r, pad_t, pad_b)
hidden_states = nn.functional.pad(hidden_states, pad_values)
return (hidden_states, pad_values)
def forward(self, hidden_states: torch.Tensor, output_attentions: Optional[bool]=False) -> tuple[torch.Tensor, torch.Tensor]:
batch_size, height, width, channels = hidden_states.size()
shortcut = hidden_states
hidden_states = self.layernorm_before(hidden_states)
hidden_states, pad_values = self.maybe_pad(hidden_states, height, width)
_, height_pad, width_pad, _ = hidden_states.shape
attention_outputs = self.attention(hidden_states, output_attentions=output_attentions)
attention_output = attention_outputs[0]
was_padded = pad_values[3] > 0 or pad_values[5] > 0
if was_padded:
attention_output = attention_output[:, :height, :width, :].contiguous()
if self.layer_scale_parameters is not None:
attention_output = self.layer_scale_parameters[0] * attention_output
hidden_states = shortcut + self.drop_path(attention_output)
layer_output = self.layernorm_after(hidden_states)
layer_output = self.output(self.intermediate(layer_output))
if self.layer_scale_parameters is not None:
layer_output = self.layer_scale_parameters[1] * layer_output
layer_output = hidden_states + self.drop_path(layer_output)
layer_outputs = (layer_output, attention_outputs[1]) if output_attentions else (layer_output,)
return layer_outputs
|
class DinatLayer(nn.Module):
def __init__(self, config, dim, num_heads, dilation, drop_path_rate=0.0):
pass
def maybe_pad(self, hidden_states, height, width):
pass
def forward(self, hidden_states: torch.Tensor, output_attentions: Optional[bool]=False) -> tuple[torch.Tensor, torch.Tensor]:
pass
| 4
| 0
| 22
| 4
| 18
| 0
| 3
| 0.02
| 1
| 7
| 4
| 0
| 3
| 11
| 3
| 13
| 69
| 13
| 55
| 33
| 47
| 1
| 45
| 29
| 41
| 5
| 1
| 1
| 10
|
2,142
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/dinat/modeling_dinat.py
|
transformers.models.dinat.modeling_dinat.DinatModel
|
from typing import Optional, Union
from ...utils import ModelOutput, OptionalDependencyNotAvailable, auto_docstring, is_natten_available, logging, requires_backends
from torch import nn
import torch
@auto_docstring
class DinatModel(DinatPreTrainedModel):
def __init__(self, config, add_pooling_layer=True):
"""
add_pooling_layer (bool, *optional*, defaults to `True`):
Whether to add a pooling layer
"""
super().__init__(config)
requires_backends(self, ['natten'])
self.config = config
self.num_levels = len(config.depths)
self.num_features = int(config.embed_dim * 2 ** (self.num_levels - 1))
self.embeddings = DinatEmbeddings(config)
self.encoder = DinatEncoder(config)
self.layernorm = nn.LayerNorm(self.num_features, eps=config.layer_norm_eps)
self.pooler = nn.AdaptiveAvgPool1d(1) if add_pooling_layer else None
self.post_init()
def get_input_embeddings(self):
return self.embeddings.patch_embeddings
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@auto_docstring
def forward(self, pixel_values: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, DinatModelOutput]:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('You have to specify pixel_values')
embedding_output = self.embeddings(pixel_values)
encoder_outputs = self.encoder(embedding_output, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
sequence_output = encoder_outputs[0]
sequence_output = self.layernorm(sequence_output)
pooled_output = None
if self.pooler is not None:
pooled_output = self.pooler(sequence_output.flatten(1, 2).transpose(1, 2))
pooled_output = torch.flatten(pooled_output, 1)
if not return_dict:
output = (sequence_output, pooled_output) + encoder_outputs[1:]
return output
return DinatModelOutput(last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, reshaped_hidden_states=encoder_outputs.reshaped_hidden_states)
|
@auto_docstring
class DinatModel(DinatPreTrainedModel):
def __init__(self, config, add_pooling_layer=True):
'''
add_pooling_layer (bool, *optional*, defaults to `True`):
Whether to add a pooling layer
'''
pass
def get_input_embeddings(self):
pass
def _prune_heads(self, heads_to_prune):
'''
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
'''
pass
@auto_docstring
def forward(self, pixel_values: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, DinatModelOutput]:
pass
| 7
| 2
| 18
| 3
| 13
| 1
| 3
| 0.08
| 1
| 7
| 3
| 0
| 4
| 7
| 4
| 5
| 83
| 16
| 62
| 25
| 43
| 5
| 35
| 18
| 30
| 7
| 2
| 1
| 12
|
2,143
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/dinat/modeling_dinat.py
|
transformers.models.dinat.modeling_dinat.DinatModelOutput
|
from ...utils import ModelOutput, OptionalDependencyNotAvailable, auto_docstring, is_natten_available, logging, requires_backends
import torch
from dataclasses import dataclass
from typing import Optional, Union
@dataclass
@auto_docstring(custom_intro="\n Dinat model's outputs that also contains a pooling of the last hidden states.\n ")
class DinatModelOutput(ModelOutput):
"""
pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`, *optional*, returned when `add_pooling_layer=True` is passed):
Average pooling of the last layer hidden-state.
reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, hidden_size, height, width)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to
include the spatial dimensions.
"""
last_hidden_state: Optional[torch.FloatTensor] = None
pooler_output: Optional[torch.FloatTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
attentions: Optional[tuple[torch.FloatTensor, ...]] = None
reshaped_hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
|
@dataclass
@auto_docstring(custom_intro="\n Dinat model's outputs that also contains a pooling of the last hidden states.\n ")
class DinatModelOutput(ModelOutput):
'''
pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`, *optional*, returned when `add_pooling_layer=True` is passed):
Average pooling of the last layer hidden-state.
reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, hidden_size, height, width)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to
include the spatial dimensions.
'''
pass
| 3
| 1
| 0
| 0
| 0
| 0
| 0
| 3.67
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 33
| 5
| 6
| 6
| 5
| 22
| 6
| 6
| 5
| 0
| 1
| 0
| 0
|
2,144
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/dinat/modeling_dinat.py
|
transformers.models.dinat.modeling_dinat.DinatOutput
|
from torch import nn
import torch
class DinatOutput(nn.Module):
def __init__(self, config, dim):
super().__init__()
self.dense = nn.Linear(int(config.mlp_ratio * dim), dim)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
return hidden_states
|
class DinatOutput(nn.Module):
def __init__(self, config, dim):
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 4
| 0
| 4
| 0
| 1
| 0
| 1
| 3
| 0
| 0
| 2
| 2
| 2
| 12
| 10
| 1
| 9
| 5
| 6
| 0
| 9
| 5
| 6
| 1
| 1
| 0
| 2
|
2,145
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/dinat/modeling_dinat.py
|
transformers.models.dinat.modeling_dinat.DinatPatchEmbeddings
|
from typing import Optional, Union
from torch import nn
import torch
class DinatPatchEmbeddings(nn.Module):
"""
This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial
`hidden_states` (patch embeddings) of shape `(batch_size, height, width, hidden_size)` to be consumed by a
Transformer.
"""
def __init__(self, config):
super().__init__()
patch_size = config.patch_size
num_channels, hidden_size = (config.num_channels, config.embed_dim)
self.num_channels = num_channels
if patch_size == 4:
pass
else:
raise ValueError('Dinat only supports patch size of 4 at the moment.')
self.projection = nn.Sequential(nn.Conv2d(self.num_channels, hidden_size // 2, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1)), nn.Conv2d(hidden_size // 2, hidden_size, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1)))
def forward(self, pixel_values: Optional[torch.FloatTensor]) -> torch.Tensor:
_, num_channels, height, width = pixel_values.shape
if num_channels != self.num_channels:
raise ValueError('Make sure that the channel dimension of the pixel values match with the one set in the configuration.')
embeddings = self.projection(pixel_values)
embeddings = embeddings.permute(0, 2, 3, 1)
return embeddings
|
class DinatPatchEmbeddings(nn.Module):
'''
This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial
`hidden_states` (patch embeddings) of shape `(batch_size, height, width, hidden_size)` to be consumed by a
Transformer.
'''
def __init__(self, config):
pass
def forward(self, pixel_values: Optional[torch.FloatTensor]) -> torch.Tensor:
pass
| 3
| 1
| 13
| 2
| 11
| 1
| 2
| 0.26
| 1
| 3
| 0
| 0
| 2
| 2
| 2
| 12
| 34
| 5
| 23
| 9
| 20
| 6
| 17
| 9
| 14
| 2
| 1
| 1
| 4
|
2,146
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/dinat/modeling_dinat.py
|
transformers.models.dinat.modeling_dinat.DinatPreTrainedModel
|
from torch import nn
from ...utils import ModelOutput, OptionalDependencyNotAvailable, auto_docstring, is_natten_available, logging, requires_backends
from ...modeling_utils import PreTrainedModel
from .configuration_dinat import DinatConfig
@auto_docstring
class DinatPreTrainedModel(PreTrainedModel):
config: DinatConfig
base_model_prefix = 'dinat'
main_input_name = 'pixel_values'
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, (nn.Linear, nn.Conv2d)):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
|
@auto_docstring
class DinatPreTrainedModel(PreTrainedModel):
def _init_weights(self, module):
'''Initialize the weights'''
pass
| 3
| 1
| 11
| 0
| 8
| 3
| 4
| 0.58
| 1
| 0
| 0
| 3
| 1
| 0
| 1
| 1
| 21
| 2
| 12
| 5
| 10
| 7
| 11
| 5
| 9
| 4
| 1
| 2
| 4
|
2,147
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/dinat/modeling_dinat.py
|
transformers.models.dinat.modeling_dinat.DinatStage
|
from typing import Optional, Union
from torch import nn
import torch
class DinatStage(nn.Module):
def __init__(self, config, dim, depth, num_heads, dilations, drop_path_rate, downsample):
super().__init__()
self.config = config
self.dim = dim
self.layers = nn.ModuleList([DinatLayer(config=config, dim=dim, num_heads=num_heads, dilation=dilations[i], drop_path_rate=drop_path_rate[i]) for i in range(depth)])
if downsample is not None:
self.downsample = downsample(dim=dim, norm_layer=nn.LayerNorm)
else:
self.downsample = None
self.pointing = False
def forward(self, hidden_states: torch.Tensor, output_attentions: Optional[bool]=False) -> tuple[torch.Tensor]:
_, height, width, _ = hidden_states.size()
for i, layer_module in enumerate(self.layers):
layer_outputs = layer_module(hidden_states, output_attentions)
hidden_states = layer_outputs[0]
hidden_states_before_downsampling = hidden_states
if self.downsample is not None:
hidden_states = self.downsample(hidden_states_before_downsampling)
stage_outputs = (hidden_states, hidden_states_before_downsampling)
if output_attentions:
stage_outputs += layer_outputs[1:]
return stage_outputs
|
class DinatStage(nn.Module):
def __init__(self, config, dim, depth, num_heads, dilations, drop_path_rate, downsample):
pass
def forward(self, hidden_states: torch.Tensor, output_attentions: Optional[bool]=False) -> tuple[torch.Tensor]:
pass
| 3
| 0
| 22
| 3
| 19
| 1
| 3
| 0.03
| 1
| 6
| 1
| 0
| 2
| 5
| 2
| 12
| 45
| 6
| 38
| 17
| 31
| 1
| 22
| 13
| 19
| 4
| 1
| 1
| 6
|
2,148
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/dinat/modeling_dinat.py
|
transformers.models.dinat.modeling_dinat.NeighborhoodAttention
|
from torch import nn
from typing import Optional, Union
import torch
import math
class NeighborhoodAttention(nn.Module):
def __init__(self, config, dim, num_heads, kernel_size, dilation):
super().__init__()
if dim % num_heads != 0:
raise ValueError(f'The hidden size ({dim}) is not a multiple of the number of attention heads ({num_heads})')
self.num_attention_heads = num_heads
self.attention_head_size = int(dim / num_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.kernel_size = kernel_size
self.dilation = dilation
self.rpb = nn.Parameter(torch.zeros(num_heads, 2 * self.kernel_size - 1, 2 * self.kernel_size - 1))
self.query = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias)
self.key = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias)
self.value = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def forward(self, hidden_states: torch.Tensor, output_attentions: Optional[bool]=False) -> tuple[torch.Tensor]:
batch_size, seq_length, _ = hidden_states.shape
query_layer = self.query(hidden_states).view(batch_size, -1, self.num_attention_heads, self.attention_head_size).transpose(1, 2)
key_layer = self.key(hidden_states).view(batch_size, -1, self.num_attention_heads, self.attention_head_size).transpose(1, 2)
value_layer = self.value(hidden_states).view(batch_size, -1, self.num_attention_heads, self.attention_head_size).transpose(1, 2)
query_layer = query_layer / math.sqrt(self.attention_head_size)
attention_scores = natten2dqkrpb(query_layer, key_layer, self.rpb, self.kernel_size, self.dilation)
attention_probs = nn.functional.softmax(attention_scores, dim=-1)
attention_probs = self.dropout(attention_probs)
context_layer = natten2dav(attention_probs, value_layer, self.kernel_size, self.dilation)
context_layer = context_layer.permute(0, 2, 3, 1, 4).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(new_context_layer_shape)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
return outputs
|
class NeighborhoodAttention(nn.Module):
def __init__(self, config, dim, num_heads, kernel_size, dilation):
pass
def forward(self, hidden_states: torch.Tensor, output_attentions: Optional[bool]=False) -> tuple[torch.Tensor]:
pass
| 3
| 0
| 19
| 4
| 13
| 3
| 2
| 0.21
| 1
| 5
| 0
| 0
| 3
| 10
| 3
| 13
| 60
| 13
| 39
| 27
| 31
| 8
| 33
| 23
| 29
| 2
| 1
| 1
| 5
|
2,149
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/dinat/modeling_dinat.py
|
transformers.models.dinat.modeling_dinat.NeighborhoodAttentionModule
|
from typing import Optional, Union
from torch import nn
from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer
import torch
class NeighborhoodAttentionModule(nn.Module):
def __init__(self, config, dim, num_heads, kernel_size, dilation):
super().__init__()
self.self = NeighborhoodAttention(config, dim, num_heads, kernel_size, dilation)
self.output = NeighborhoodAttentionOutput(config, dim)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads)
self.self.query = prune_linear_layer(self.self.query, index)
self.self.key = prune_linear_layer(self.self.key, index)
self.self.value = prune_linear_layer(self.self.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(self, hidden_states: torch.Tensor, output_attentions: Optional[bool]=False) -> tuple[torch.Tensor]:
self_outputs = self.self(hidden_states, output_attentions)
attention_output = self.output(self_outputs[0], hidden_states)
outputs = (attention_output,) + self_outputs[1:]
return outputs
|
class NeighborhoodAttentionModule(nn.Module):
def __init__(self, config, dim, num_heads, kernel_size, dilation):
pass
def prune_heads(self, heads):
pass
def forward(self, hidden_states: torch.Tensor, output_attentions: Optional[bool]=False) -> tuple[torch.Tensor]:
pass
| 4
| 0
| 10
| 1
| 9
| 1
| 1
| 0.11
| 1
| 6
| 2
| 0
| 3
| 3
| 3
| 13
| 34
| 4
| 28
| 15
| 20
| 3
| 22
| 11
| 18
| 2
| 1
| 1
| 4
|
2,150
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/dinat/modeling_dinat.py
|
transformers.models.dinat.modeling_dinat.NeighborhoodAttentionOutput
|
from torch import nn
import torch
class NeighborhoodAttentionOutput(nn.Module):
def __init__(self, config, dim):
super().__init__()
self.dense = nn.Linear(dim, dim)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
return hidden_states
|
class NeighborhoodAttentionOutput(nn.Module):
def __init__(self, config, dim):
pass
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 5
| 1
| 4
| 0
| 1
| 0
| 1
| 2
| 0
| 0
| 2
| 2
| 2
| 12
| 11
| 2
| 9
| 5
| 6
| 0
| 9
| 5
| 6
| 1
| 1
| 0
| 2
|
2,151
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/dinov2/configuration_dinov2.py
|
transformers.models.dinov2.configuration_dinov2.Dinov2Config
|
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
from ...configuration_utils import PretrainedConfig
class Dinov2Config(BackboneConfigMixin, PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`Dinov2Model`]. It is used to instantiate an
Dinov2 model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the Dinov2
[google/dinov2-base-patch16-224](https://huggingface.co/google/dinov2-base-patch16-224) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
hidden_size (`int`, *optional*, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
mlp_ratio (`int`, *optional*, defaults to 4):
Ratio of the hidden size of the MLPs relative to the `hidden_size`.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` are supported.
hidden_dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon used by the layer normalization layers.
image_size (`int`, *optional*, defaults to 224):
The size (resolution) of each image.
patch_size (`int`, *optional*, defaults to 14):
The size (resolution) of each patch.
num_channels (`int`, *optional*, defaults to 3):
The number of input channels.
qkv_bias (`bool`, *optional*, defaults to `True`):
Whether to add a bias to the queries, keys and values.
layerscale_value (`float`, *optional*, defaults to 1.0):
Initial value to use for layer scale.
drop_path_rate (`float`, *optional*, defaults to 0.0):
Stochastic depth rate per sample (when applied in the main path of residual layers).
use_swiglu_ffn (`bool`, *optional*, defaults to `False`):
Whether to use the SwiGLU feedforward neural network.
out_features (`list[str]`, *optional*):
If used as backbone, list of features to output. Can be any of `"stem"`, `"stage1"`, `"stage2"`, etc.
(depending on how many stages the model has). If unset and `out_indices` is set, will default to the
corresponding stages. If unset and `out_indices` is unset, will default to the last stage. Must be in the
same order as defined in the `stage_names` attribute.
out_indices (`list[int]`, *optional*):
If used as backbone, list of indices of features to output. Can be any of 0, 1, 2, etc. (depending on how
many stages the model has). If unset and `out_features` is set, will default to the corresponding stages.
If unset and `out_features` is unset, will default to the last stage. Must be in the
same order as defined in the `stage_names` attribute.
apply_layernorm (`bool`, *optional*, defaults to `True`):
Whether to apply layer normalization to the feature maps in case the model is used as backbone.
reshape_hidden_states (`bool`, *optional*, defaults to `True`):
Whether to reshape the feature maps to 4D tensors of shape `(batch_size, hidden_size, height, width)` in
case the model is used as backbone. If `False`, the feature maps will be 3D tensors of shape `(batch_size,
seq_len, hidden_size)`.
use_mask_token (`bool`, *optional*, defaults to `True`):
Whether to use mask_token in embeddings.
Example:
```python
>>> from transformers import Dinov2Config, Dinov2Model
>>> # Initializing a Dinov2 dinov2-base-patch16-224 style configuration
>>> configuration = Dinov2Config()
>>> # Initializing a model (with random weights) from the dinov2-base-patch16-224 style configuration
>>> model = Dinov2Model(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = 'dinov2'
def __init__(self, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, mlp_ratio=4, hidden_act='gelu', hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, initializer_range=0.02, layer_norm_eps=1e-06, image_size=224, patch_size=14, num_channels=3, qkv_bias=True, layerscale_value=1.0, drop_path_rate=0.0, use_swiglu_ffn=False, out_features=None, out_indices=None, apply_layernorm=True, reshape_hidden_states=True, use_mask_token=True, **kwargs):
super().__init__(**kwargs)
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.mlp_ratio = mlp_ratio
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.qkv_bias = qkv_bias
self.layerscale_value = layerscale_value
self.drop_path_rate = drop_path_rate
self.use_swiglu_ffn = use_swiglu_ffn
self.stage_names = ['stem'] + [f'stage{idx}' for idx in range(1, num_hidden_layers + 1)]
self._out_features, self._out_indices = get_aligned_output_features_output_indices(out_features=out_features, out_indices=out_indices, stage_names=self.stage_names)
self.apply_layernorm = apply_layernorm
self.reshape_hidden_states = reshape_hidden_states
self.use_mask_token = use_mask_token
|
class Dinov2Config(BackboneConfigMixin, PretrainedConfig):
'''
This is the configuration class to store the configuration of a [`Dinov2Model`]. It is used to instantiate an
Dinov2 model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the Dinov2
[google/dinov2-base-patch16-224](https://huggingface.co/google/dinov2-base-patch16-224) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
hidden_size (`int`, *optional*, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
mlp_ratio (`int`, *optional*, defaults to 4):
Ratio of the hidden size of the MLPs relative to the `hidden_size`.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` are supported.
hidden_dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon used by the layer normalization layers.
image_size (`int`, *optional*, defaults to 224):
The size (resolution) of each image.
patch_size (`int`, *optional*, defaults to 14):
The size (resolution) of each patch.
num_channels (`int`, *optional*, defaults to 3):
The number of input channels.
qkv_bias (`bool`, *optional*, defaults to `True`):
Whether to add a bias to the queries, keys and values.
layerscale_value (`float`, *optional*, defaults to 1.0):
Initial value to use for layer scale.
drop_path_rate (`float`, *optional*, defaults to 0.0):
Stochastic depth rate per sample (when applied in the main path of residual layers).
use_swiglu_ffn (`bool`, *optional*, defaults to `False`):
Whether to use the SwiGLU feedforward neural network.
out_features (`list[str]`, *optional*):
If used as backbone, list of features to output. Can be any of `"stem"`, `"stage1"`, `"stage2"`, etc.
(depending on how many stages the model has). If unset and `out_indices` is set, will default to the
corresponding stages. If unset and `out_indices` is unset, will default to the last stage. Must be in the
same order as defined in the `stage_names` attribute.
out_indices (`list[int]`, *optional*):
If used as backbone, list of indices of features to output. Can be any of 0, 1, 2, etc. (depending on how
many stages the model has). If unset and `out_features` is set, will default to the corresponding stages.
If unset and `out_features` is unset, will default to the last stage. Must be in the
same order as defined in the `stage_names` attribute.
apply_layernorm (`bool`, *optional*, defaults to `True`):
Whether to apply layer normalization to the feature maps in case the model is used as backbone.
reshape_hidden_states (`bool`, *optional*, defaults to `True`):
Whether to reshape the feature maps to 4D tensors of shape `(batch_size, hidden_size, height, width)` in
case the model is used as backbone. If `False`, the feature maps will be 3D tensors of shape `(batch_size,
seq_len, hidden_size)`.
use_mask_token (`bool`, *optional*, defaults to `True`):
Whether to use mask_token in embeddings.
Example:
```python
>>> from transformers import Dinov2Config, Dinov2Model
>>> # Initializing a Dinov2 dinov2-base-patch16-224 style configuration
>>> configuration = Dinov2Config()
>>> # Initializing a model (with random weights) from the dinov2-base-patch16-224 style configuration
>>> model = Dinov2Model(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```'''
def __init__(self, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, mlp_ratio=4, hidden_act='gelu', hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, initializer_range=0.02, layer_norm_eps=1e-06, image_size=224, patch_size=14, num_channels=3, qkv_bias=True, layerscale_value=1.0, drop_path_rate=0.0, use_swiglu_ffn=False, out_features=None, out_indices=None, apply_layernorm=True, reshape_hidden_states=True, use_mask_token=True, **kwargs):
pass
| 2
| 1
| 48
| 1
| 47
| 0
| 1
| 1.37
| 2
| 2
| 0
| 0
| 1
| 21
| 1
| 6
| 126
| 10
| 49
| 46
| 24
| 67
| 24
| 23
| 22
| 1
| 1
| 0
| 1
|
2,152
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/dinov2/configuration_dinov2.py
|
transformers.models.dinov2.configuration_dinov2.Dinov2OnnxConfig
|
from ...onnx import OnnxConfig
from packaging import version
from collections import OrderedDict
from collections.abc import Mapping
class Dinov2OnnxConfig(OnnxConfig):
torch_onnx_minimum_version = version.parse('1.11')
@property
def inputs(self) -> Mapping[str, Mapping[int, str]]:
return OrderedDict([('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'})])
@property
def atol_for_validation(self) -> float:
return 0.0001
|
class Dinov2OnnxConfig(OnnxConfig):
@property
def inputs(self) -> Mapping[str, Mapping[int, str]]:
pass
@property
def atol_for_validation(self) -> float:
pass
| 5
| 0
| 4
| 0
| 4
| 0
| 1
| 0
| 1
| 4
| 0
| 0
| 2
| 0
| 2
| 2
| 14
| 2
| 12
| 6
| 7
| 0
| 6
| 4
| 3
| 1
| 1
| 0
| 2
|
2,153
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/dinov2/modeling_dinov2.py
|
transformers.models.dinov2.modeling_dinov2.Dinov2Attention
|
from .configuration_dinov2 import Dinov2Config
import torch
from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer
from typing import Callable, Optional, Union
from torch import nn
class Dinov2Attention(nn.Module):
def __init__(self, config: Dinov2Config):
super().__init__()
self.attention = Dinov2SelfAttention(config)
self.output = Dinov2SelfOutput(config)
self.pruned_heads = set()
def prune_heads(self, heads: set[int]):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(heads, self.attention.num_attention_heads, self.attention.attention_head_size, self.pruned_heads)
self.attention.query = prune_linear_layer(self.attention.query, index)
self.attention.key = prune_linear_layer(self.attention.key, index)
self.attention.value = prune_linear_layer(self.attention.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
self.attention.num_attention_heads = self.attention.num_attention_heads - len(heads)
self.attention.all_head_size = self.attention.attention_head_size * self.attention.num_attention_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(self, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor]=None) -> torch.Tensor:
self_attn_output, _ = self.attention(hidden_states, head_mask)
output = self.output(self_attn_output, hidden_states)
return output
|
class Dinov2Attention(nn.Module):
def __init__(self, config: Dinov2Config):
pass
def prune_heads(self, heads: set[int]):
pass
def forward(self, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor]=None) -> torch.Tensor:
pass
| 4
| 0
| 11
| 1
| 9
| 1
| 1
| 0.1
| 1
| 8
| 3
| 1
| 3
| 3
| 3
| 13
| 37
| 6
| 29
| 16
| 20
| 3
| 22
| 11
| 18
| 2
| 1
| 1
| 4
|
2,154
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/dinov2/modeling_dinov2.py
|
transformers.models.dinov2.modeling_dinov2.Dinov2Backbone
|
import torch
from ...utils.backbone_utils import BackboneMixin
from ...utils.generic import can_return_tuple, check_model_inputs
from ...modeling_outputs import BackboneOutput, BaseModelOutput, BaseModelOutputWithPooling, ImageClassifierOutput
from typing import Callable, Optional, Union
from torch import nn
from ...utils import TransformersKwargs, auto_docstring, logging, torch_int
@auto_docstring(custom_intro='\n Dinov2 backbone, to be used with frameworks like DETR and MaskFormer.\n ')
class Dinov2Backbone(Dinov2PreTrainedModel, BackboneMixin):
def __init__(self, config):
super().__init__(config)
super()._init_backbone(config)
self.num_features = [config.hidden_size for _ in range(config.num_hidden_layers + 1)]
self.embeddings = Dinov2Embeddings(config)
self.encoder = Dinov2Encoder(config)
self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.post_init()
def get_input_embeddings(self) -> Dinov2PatchEmbeddings:
return self.embeddings.patch_embeddings
@check_model_inputs
@auto_docstring
def forward(self, pixel_values: torch.Tensor, output_hidden_states: Optional[bool]=None, **kwargs) -> BackboneOutput:
"""
Examples:
```python
>>> from transformers import AutoImageProcessor, AutoBackbone
>>> import torch
>>> from PIL import Image
>>> import requests
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> processor = AutoImageProcessor.from_pretrained("facebook/dinov2-base")
>>> model = AutoBackbone.from_pretrained(
... "facebook/dinov2-base", out_features=["stage2", "stage5", "stage8", "stage11"]
... )
>>> inputs = processor(image, return_tensors="pt")
>>> outputs = model(**inputs)
>>> feature_maps = outputs.feature_maps
>>> list(feature_maps[-1].shape)
[1, 768, 16, 16]
```"""
if output_hidden_states is None:
output_hidden_states = self.config.output_hidden_states
embedding_output = self.embeddings(pixel_values)
output: BaseModelOutput = self.encoder(embedding_output, output_hidden_states=True)
hidden_states = output.hidden_states
feature_maps = []
for stage, hidden_state in zip(self.stage_names, hidden_states):
if stage in self.out_features:
if self.config.apply_layernorm:
hidden_state = self.layernorm(hidden_state)
if self.config.reshape_hidden_states:
hidden_state = hidden_state[:, 1:]
batch_size, _, height, width = pixel_values.shape
patch_size = self.config.patch_size
hidden_state = hidden_state.reshape(batch_size, height // patch_size, width // patch_size, -1)
hidden_state = hidden_state.permute(0, 3, 1, 2).contiguous()
feature_maps.append(hidden_state)
return BackboneOutput(feature_maps=tuple(feature_maps), hidden_states=hidden_states if output_hidden_states else None)
|
@auto_docstring(custom_intro='\n Dinov2 backbone, to be used with frameworks like DETR and MaskFormer.\n ')
class Dinov2Backbone(Dinov2PreTrainedModel, BackboneMixin):
def __init__(self, config):
pass
def get_input_embeddings(self) -> Dinov2PatchEmbeddings:
pass
@check_model_inputs
@auto_docstring
def forward(self, pixel_values: torch.Tensor, output_hidden_states: Optional[bool]=None, **kwargs) -> BackboneOutput:
'''
Examples:
```python
>>> from transformers import AutoImageProcessor, AutoBackbone
>>> import torch
>>> from PIL import Image
>>> import requests
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> processor = AutoImageProcessor.from_pretrained("facebook/dinov2-base")
>>> model = AutoBackbone.from_pretrained(
... "facebook/dinov2-base", out_features=["stage2", "stage5", "stage8", "stage11"]
... )
>>> inputs = processor(image, return_tensors="pt")
>>> outputs = model(**inputs)
>>> feature_maps = outputs.feature_maps
>>> list(feature_maps[-1].shape)
[1, 768, 16, 16]
```'''
pass
| 7
| 1
| 29
| 5
| 17
| 8
| 5
| 0.43
| 2
| 9
| 4
| 1
| 3
| 4
| 3
| 16
| 93
| 17
| 53
| 23
| 41
| 23
| 36
| 16
| 32
| 13
| 2
| 3
| 15
|
2,155
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/dinov2/modeling_dinov2.py
|
transformers.models.dinov2.modeling_dinov2.Dinov2DropPath
|
from torch import nn
from typing import Callable, Optional, Union
import torch
class Dinov2DropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
def __init__(self, drop_prob: Optional[float]=None) -> None:
super().__init__()
self.drop_prob = drop_prob
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
return drop_path(hidden_states, self.drop_prob, self.training)
def extra_repr(self) -> str:
return f'p={self.drop_prob}'
|
class Dinov2DropPath(nn.Module):
'''Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).'''
def __init__(self, drop_prob: Optional[float]=None) -> None:
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
def extra_repr(self) -> str:
pass
| 4
| 1
| 2
| 0
| 2
| 0
| 1
| 0.13
| 1
| 4
| 0
| 0
| 3
| 1
| 3
| 13
| 12
| 3
| 8
| 5
| 4
| 1
| 8
| 5
| 4
| 1
| 1
| 0
| 3
|
2,156
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/dinov2/modeling_dinov2.py
|
transformers.models.dinov2.modeling_dinov2.Dinov2Embeddings
|
from .configuration_dinov2 import Dinov2Config
from torch import nn
from ...utils import TransformersKwargs, auto_docstring, logging, torch_int
import torch
from typing import Callable, Optional, Union
class Dinov2Embeddings(nn.Module):
"""
Construct the CLS token, mask token, position and patch embeddings.
"""
def __init__(self, config: Dinov2Config) -> None:
super().__init__()
self.cls_token = nn.Parameter(torch.randn(1, 1, config.hidden_size))
if config.use_mask_token:
self.mask_token = nn.Parameter(torch.zeros(1, config.hidden_size))
self.patch_embeddings = Dinov2PatchEmbeddings(config)
num_patches = self.patch_embeddings.num_patches
self.position_embeddings = nn.Parameter(torch.randn(1, num_patches + 1, config.hidden_size))
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.patch_size = config.patch_size
self.use_mask_token = config.use_mask_token
self.config = config
def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor:
"""
This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution
images. This method is also adapted to support torch.jit tracing and interpolation at torch.float32 precision.
Adapted from:
- https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174-L194, and
- https://github.com/facebookresearch/dinov2/blob/e1277af2ba9496fbadf7aec6eba56e8d882d1e35/dinov2/models/vision_transformer.py#L179-L211
"""
num_patches = embeddings.shape[1] - 1
num_positions = self.position_embeddings.shape[1] - 1
if not torch.jit.is_tracing() and num_patches == num_positions and (height == width):
return self.position_embeddings
class_pos_embed = self.position_embeddings[:, :1]
patch_pos_embed = self.position_embeddings[:, 1:]
dim = embeddings.shape[-1]
new_height = height // self.patch_size
new_width = width // self.patch_size
sqrt_num_positions = torch_int(num_positions ** 0.5)
patch_pos_embed = patch_pos_embed.reshape(1, sqrt_num_positions, sqrt_num_positions, dim)
patch_pos_embed = patch_pos_embed.permute(0, 3, 1, 2)
target_dtype = patch_pos_embed.dtype
patch_pos_embed = nn.functional.interpolate(patch_pos_embed.to(torch.float32), size=(new_height, new_width), mode='bicubic', align_corners=False).to(dtype=target_dtype)
patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
return torch.cat((class_pos_embed, patch_pos_embed), dim=1)
def forward(self, pixel_values: torch.Tensor, bool_masked_pos: Optional[torch.Tensor]=None) -> torch.Tensor:
batch_size, _, height, width = pixel_values.shape
target_dtype = self.patch_embeddings.projection.weight.dtype
embeddings = self.patch_embeddings(pixel_values.to(dtype=target_dtype))
if bool_masked_pos is not None and self.use_mask_token:
embeddings = torch.where(bool_masked_pos.unsqueeze(-1), self.mask_token.to(embeddings.dtype).unsqueeze(0), embeddings)
cls_tokens = self.cls_token.expand(batch_size, -1, -1)
embeddings = torch.cat((cls_tokens, embeddings), dim=1)
embeddings = embeddings + self.interpolate_pos_encoding(embeddings, height, width)
embeddings = self.dropout(embeddings)
return embeddings
|
class Dinov2Embeddings(nn.Module):
'''
Construct the CLS token, mask token, position and patch embeddings.
'''
def __init__(self, config: Dinov2Config) -> None:
pass
def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor:
'''
This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution
images. This method is also adapted to support torch.jit tracing and interpolation at torch.float32 precision.
Adapted from:
- https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174-L194, and
- https://github.com/facebookresearch/dinov2/blob/e1277af2ba9496fbadf7aec6eba56e8d882d1e35/dinov2/models/vision_transformer.py#L179-L211
'''
pass
def forward(self, pixel_values: torch.Tensor, bool_masked_pos: Optional[torch.Tensor]=None) -> torch.Tensor:
pass
| 4
| 2
| 23
| 5
| 15
| 3
| 2
| 0.28
| 1
| 5
| 2
| 0
| 3
| 7
| 3
| 13
| 77
| 18
| 46
| 25
| 42
| 13
| 39
| 25
| 35
| 2
| 1
| 1
| 5
|
2,157
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/dinov2/modeling_dinov2.py
|
transformers.models.dinov2.modeling_dinov2.Dinov2Encoder
|
from ...modeling_outputs import BackboneOutput, BaseModelOutput, BaseModelOutputWithPooling, ImageClassifierOutput
from .configuration_dinov2 import Dinov2Config
from torch import nn
import torch
from typing import Callable, Optional, Union
class Dinov2Encoder(nn.Module):
def __init__(self, config: Dinov2Config):
super().__init__()
self.config = config
self.layer = nn.ModuleList([Dinov2Layer(config) for _ in range(config.num_hidden_layers)])
self.gradient_checkpointing = False
def forward(self, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor]=None, output_hidden_states: bool=False) -> BaseModelOutput:
all_hidden_states = [hidden_states] if output_hidden_states else None
for i, layer_module in enumerate(self.layer):
layer_head_mask = head_mask[i] if head_mask is not None else None
hidden_states = layer_module(hidden_states, layer_head_mask)
if all_hidden_states:
all_hidden_states.append(hidden_states)
return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=tuple(all_hidden_states) if all_hidden_states else None)
|
class Dinov2Encoder(nn.Module):
def __init__(self, config: Dinov2Config):
pass
def forward(self, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor]=None, output_hidden_states: bool=False) -> BaseModelOutput:
pass
| 3
| 0
| 24
| 4
| 20
| 0
| 6
| 0
| 1
| 9
| 3
| 1
| 2
| 3
| 2
| 12
| 49
| 8
| 41
| 18
| 31
| 0
| 24
| 11
| 21
| 10
| 1
| 2
| 11
|
2,158
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/dinov2/modeling_dinov2.py
|
transformers.models.dinov2.modeling_dinov2.Dinov2ForImageClassification
|
from ...processing_utils import Unpack
from torch import nn
from ...utils.generic import can_return_tuple, check_model_inputs
from .configuration_dinov2 import Dinov2Config
from ...utils import TransformersKwargs, auto_docstring, logging, torch_int
from ...modeling_outputs import BackboneOutput, BaseModelOutput, BaseModelOutputWithPooling, ImageClassifierOutput
from typing import Callable, Optional, Union
import torch
@auto_docstring(custom_intro='\n Dinov2 Model transformer with an image classification head on top (a linear layer on top of the final hidden state\n of the [CLS] token) e.g. for ImageNet.\n ')
class Dinov2ForImageClassification(Dinov2PreTrainedModel):
def __init__(self, config: Dinov2Config) -> None:
super().__init__(config)
self.num_labels = config.num_labels
self.dinov2 = Dinov2Model(config)
self.classifier = nn.Linear(config.hidden_size * 2, config.num_labels) if config.num_labels > 0 else nn.Identity()
self.post_init()
@can_return_tuple
@auto_docstring
def forward(self, pixel_values: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, **kwargs: Unpack[TransformersKwargs]) -> ImageClassifierOutput:
"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
outputs: BaseModelOutputWithPooling = self.dinov2(pixel_values, head_mask=head_mask, **kwargs)
sequence_output = outputs.last_hidden_state
cls_token = sequence_output[:, 0]
patch_tokens = sequence_output[:, 1:]
linear_input = torch.cat([cls_token, patch_tokens.mean(dim=1)], dim=1)
logits = self.classifier(linear_input)
loss = None
if labels is not None:
loss = self.loss_function(labels, logits, self.config, **kwargs)
return ImageClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
|
@auto_docstring(custom_intro='\n Dinov2 Model transformer with an image classification head on top (a linear layer on top of the final hidden state\n of the [CLS] token) e.g. for ImageNet.\n ')
class Dinov2ForImageClassification(Dinov2PreTrainedModel):
def __init__(self, config: Dinov2Config) -> None:
pass
@can_return_tuple
@auto_docstring
def forward(self, pixel_values: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, **kwargs: Unpack[TransformersKwargs]) -> ImageClassifierOutput:
'''
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
'''
pass
| 6
| 1
| 41
| 6
| 31
| 5
| 7
| 0.14
| 1
| 8
| 3
| 1
| 2
| 3
| 2
| 3
| 91
| 13
| 69
| 24
| 51
| 10
| 36
| 15
| 33
| 12
| 2
| 3
| 14
|
2,159
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/dinov2/modeling_dinov2.py
|
transformers.models.dinov2.modeling_dinov2.Dinov2Layer
|
from ...modeling_layers import GradientCheckpointingLayer
import torch
from .configuration_dinov2 import Dinov2Config
from typing import Callable, Optional, Union
from torch import nn
class Dinov2Layer(GradientCheckpointingLayer):
"""This corresponds to the Block class in the original implementation."""
def __init__(self, config: Dinov2Config) -> None:
super().__init__()
self.norm1 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.attention = Dinov2Attention(config)
self.layer_scale1 = Dinov2LayerScale(config)
self.drop_path = Dinov2DropPath(config.drop_path_rate) if config.drop_path_rate > 0.0 else nn.Identity()
self.norm2 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
if config.use_swiglu_ffn:
self.mlp = Dinov2SwiGLUFFN(config)
else:
self.mlp = Dinov2MLP(config)
self.layer_scale2 = Dinov2LayerScale(config)
def forward(self, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor]=None) -> torch.Tensor:
hidden_states_norm = self.norm1(hidden_states)
self_attention_output = self.attention(hidden_states_norm, head_mask)
self_attention_output = self.layer_scale1(self_attention_output)
hidden_states = self.drop_path(self_attention_output) + hidden_states
layer_output = self.norm2(hidden_states)
layer_output = self.mlp(layer_output)
layer_output = self.layer_scale2(layer_output)
layer_output = self.drop_path(layer_output) + hidden_states
return layer_output
|
class Dinov2Layer(GradientCheckpointingLayer):
'''This corresponds to the Block class in the original implementation.'''
def __init__(self, config: Dinov2Config) -> None:
pass
def forward(self, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor]=None) -> torch.Tensor:
pass
| 3
| 1
| 23
| 5
| 17
| 3
| 2
| 0.18
| 1
| 8
| 5
| 0
| 2
| 7
| 2
| 12
| 49
| 11
| 34
| 19
| 26
| 6
| 24
| 14
| 21
| 3
| 1
| 1
| 4
|
2,160
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/dinov2/modeling_dinov2.py
|
transformers.models.dinov2.modeling_dinov2.Dinov2LayerScale
|
import torch
from torch import nn
class Dinov2LayerScale(nn.Module):
def __init__(self, config) -> None:
super().__init__()
self.lambda1 = nn.Parameter(config.layerscale_value * torch.ones(config.hidden_size))
def forward(self, hidden_state: torch.Tensor) -> torch.Tensor:
return hidden_state * self.lambda1
|
class Dinov2LayerScale(nn.Module):
def __init__(self, config) -> None:
pass
def forward(self, hidden_state: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 3
| 0
| 3
| 0
| 1
| 0
| 1
| 2
| 0
| 0
| 2
| 1
| 2
| 12
| 7
| 1
| 6
| 4
| 3
| 0
| 6
| 4
| 3
| 1
| 1
| 0
| 2
|
2,161
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/dinov2/modeling_dinov2.py
|
transformers.models.dinov2.modeling_dinov2.Dinov2MLP
|
from torch import nn
from ...activations import ACT2FN
import torch
class Dinov2MLP(nn.Module):
def __init__(self, config) -> None:
super().__init__()
in_features = out_features = config.hidden_size
hidden_features = int(config.hidden_size * config.mlp_ratio)
self.fc1 = nn.Linear(in_features, hidden_features, bias=True)
if isinstance(config.hidden_act, str):
self.activation = ACT2FN[config.hidden_act]
else:
self.activation = config.hidden_act
self.fc2 = nn.Linear(hidden_features, out_features, bias=True)
def forward(self, hidden_state: torch.Tensor) -> torch.Tensor:
hidden_state = self.fc1(hidden_state)
hidden_state = self.activation(hidden_state)
hidden_state = self.fc2(hidden_state)
return hidden_state
|
class Dinov2MLP(nn.Module):
def __init__(self, config) -> None:
pass
def forward(self, hidden_state: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 8
| 0
| 8
| 0
| 2
| 0
| 1
| 4
| 0
| 0
| 2
| 3
| 2
| 12
| 17
| 1
| 16
| 8
| 13
| 0
| 15
| 8
| 12
| 2
| 1
| 1
| 3
|
2,162
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/dinov2/modeling_dinov2.py
|
transformers.models.dinov2.modeling_dinov2.Dinov2Model
|
from ...modeling_outputs import BackboneOutput, BaseModelOutput, BaseModelOutputWithPooling, ImageClassifierOutput
import torch
from typing import Callable, Optional, Union
from ...utils.generic import can_return_tuple, check_model_inputs
from torch import nn
from .configuration_dinov2 import Dinov2Config
from ...utils import TransformersKwargs, auto_docstring, logging, torch_int
@auto_docstring
class Dinov2Model(Dinov2PreTrainedModel):
def __init__(self, config: Dinov2Config):
super().__init__(config)
self.config = config
self.embeddings = Dinov2Embeddings(config)
self.encoder = Dinov2Encoder(config)
self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.post_init()
def get_input_embeddings(self) -> Dinov2PatchEmbeddings:
return self.embeddings.patch_embeddings
def _prune_heads(self, heads_to_prune: dict[int, list[int]]) -> None:
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@check_model_inputs
@auto_docstring
def forward(self, pixel_values: Optional[torch.Tensor]=None, bool_masked_pos: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, output_hidden_states: Optional[bool]=None, **kwargs) -> BaseModelOutputWithPooling:
"""
bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, sequence_length)`):
Boolean masked positions. Indicates which patches are masked (1) and which aren't (0). Only relevant for
pre-training.
"""
if output_hidden_states is None:
output_hidden_states = self.config.output_hidden_states
if pixel_values is None:
raise ValueError('You have to specify pixel_values')
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
embedding_output = self.embeddings(pixel_values, bool_masked_pos=bool_masked_pos)
encoder_outputs: BaseModelOutput = self.encoder(embedding_output, head_mask=head_mask, output_hidden_states=output_hidden_states)
sequence_output = encoder_outputs.last_hidden_state
sequence_output = self.layernorm(sequence_output)
pooled_output = sequence_output[:, 0, :]
return BaseModelOutputWithPooling(last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states)
|
@auto_docstring
class Dinov2Model(Dinov2PreTrainedModel):
def __init__(self, config: Dinov2Config):
pass
def get_input_embeddings(self) -> Dinov2PatchEmbeddings:
pass
def _prune_heads(self, heads_to_prune: dict[int, list[int]]) -> None:
'''
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
'''
pass
@check_model_inputs
@auto_docstring
def forward(self, pixel_values: Optional[torch.Tensor]=None, bool_masked_pos: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, output_hidden_states: Optional[bool]=None, **kwargs) -> BaseModelOutputWithPooling:
'''
bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, sequence_length)`):
Boolean masked positions. Indicates which patches are masked (1) and which aren't (0). Only relevant for
pre-training.
'''
pass
| 8
| 2
| 17
| 2
| 12
| 3
| 3
| 0.17
| 1
| 10
| 5
| 1
| 4
| 4
| 4
| 5
| 80
| 12
| 58
| 24
| 37
| 10
| 29
| 15
| 24
| 6
| 2
| 1
| 10
|
2,163
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/dinov2/modeling_dinov2.py
|
transformers.models.dinov2.modeling_dinov2.Dinov2PatchEmbeddings
|
from torch import nn
import collections.abc
import torch
class Dinov2PatchEmbeddings(nn.Module):
"""
This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial
`hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a
Transformer.
"""
def __init__(self, config):
super().__init__()
image_size, patch_size = (config.image_size, config.patch_size)
num_channels, hidden_size = (config.num_channels, config.hidden_size)
image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size)
patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size)
num_patches = image_size[1] // patch_size[1] * (image_size[0] // patch_size[0])
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.num_patches = num_patches
self.projection = nn.Conv2d(num_channels, hidden_size, kernel_size=patch_size, stride=patch_size)
def forward(self, pixel_values: torch.Tensor) -> torch.Tensor:
num_channels = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(f'Make sure that the channel dimension of the pixel values match with the one set in the configuration. Expected {self.num_channels} but got {num_channels}.')
embeddings = self.projection(pixel_values).flatten(2).transpose(1, 2)
return embeddings
|
class Dinov2PatchEmbeddings(nn.Module):
'''
This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial
`hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a
Transformer.
'''
def __init__(self, config):
pass
def forward(self, pixel_values: torch.Tensor) -> torch.Tensor:
pass
| 3
| 1
| 12
| 1
| 11
| 0
| 3
| 0.23
| 1
| 4
| 0
| 1
| 2
| 5
| 2
| 12
| 31
| 4
| 22
| 13
| 19
| 5
| 19
| 13
| 16
| 3
| 1
| 1
| 5
|
2,164
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/dinov2/modeling_dinov2.py
|
transformers.models.dinov2.modeling_dinov2.Dinov2PreTrainedModel
|
from ...utils import TransformersKwargs, auto_docstring, logging, torch_int
from .configuration_dinov2 import Dinov2Config
from torch import nn
import torch
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from typing import Callable, Optional, Union
@auto_docstring
class Dinov2PreTrainedModel(PreTrainedModel):
config: Dinov2Config
base_model_prefix = 'dinov2'
main_input_name = 'pixel_values'
supports_gradient_checkpointing = True
_no_split_modules = ['Dinov2Layer']
_supports_sdpa = True
_supports_flash_attn = True
_supports_flex_attn = True
_supports_attention_backend = True
_can_record_outputs = {'attentions': Dinov2SelfAttention}
def _init_weights(self, module: Union[nn.Linear, nn.Conv2d, nn.LayerNorm]) -> None:
"""Initialize the weights"""
if isinstance(module, (nn.Linear, nn.Conv2d)):
module.weight.data = nn.init.trunc_normal_(module.weight.data.to(torch.float32), mean=0.0, std=self.config.initializer_range).to(module.weight.dtype)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
elif isinstance(module, Dinov2Embeddings):
module.position_embeddings.data = nn.init.trunc_normal_(module.position_embeddings.data.to(torch.float32), mean=0.0, std=self.config.initializer_range).to(module.position_embeddings.dtype)
module.cls_token.data = nn.init.trunc_normal_(module.cls_token.data.to(torch.float32), mean=0.0, std=self.config.initializer_range).to(module.cls_token.dtype)
if self.config.use_mask_token:
module.mask_token.data.zero_()
elif isinstance(module, Dinov2LayerScale):
module.lambda1.data.fill_(self.config.layerscale_value)
|
@auto_docstring
class Dinov2PreTrainedModel(PreTrainedModel):
def _init_weights(self, module: Union[nn.Linear, nn.Conv2d, nn.LayerNorm]) -> None:
'''Initialize the weights'''
pass
| 3
| 1
| 25
| 1
| 21
| 3
| 5
| 0.25
| 1
| 1
| 1
| 4
| 1
| 0
| 1
| 1
| 38
| 3
| 28
| 8
| 26
| 7
| 16
| 8
| 14
| 5
| 1
| 2
| 5
|
2,165
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/dinov2/modeling_dinov2.py
|
transformers.models.dinov2.modeling_dinov2.Dinov2SelfAttention
|
import torch
from typing import Callable, Optional, Union
from torch import nn
from .configuration_dinov2 import Dinov2Config
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
class Dinov2SelfAttention(nn.Module):
def __init__(self, config: Dinov2Config):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and (not hasattr(config, 'embedding_size')):
raise ValueError(f'The hidden size {config.hidden_size} is not a multiple of the number of attention heads {config.num_attention_heads}.')
self.config = config
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.dropout_prob = config.attention_probs_dropout_prob
self.scaling = self.attention_head_size ** (-0.5)
self.is_causal = False
self.query = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias)
self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias)
self.value = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias)
def forward(self, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor]=None) -> tuple[torch.Tensor, torch.Tensor]:
batch_size = hidden_states.shape[0]
new_shape = (batch_size, -1, self.num_attention_heads, self.attention_head_size)
key_layer = self.key(hidden_states).view(*new_shape).transpose(1, 2)
value_layer = self.value(hidden_states).view(*new_shape).transpose(1, 2)
query_layer = self.query(hidden_states).view(*new_shape).transpose(1, 2)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != 'eager':
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
context_layer, attention_probs = attention_interface(self, query_layer, key_layer, value_layer, head_mask, is_causal=self.is_causal, scaling=self.scaling, dropout=0.0 if not self.training else self.dropout_prob)
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.reshape(new_context_layer_shape)
return (context_layer, attention_probs)
|
class Dinov2SelfAttention(nn.Module):
def __init__(self, config: Dinov2Config):
pass
def forward(self, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor]=None) -> tuple[torch.Tensor, torch.Tensor]:
pass
| 3
| 0
| 18
| 4
| 12
| 2
| 2
| 0.13
| 1
| 6
| 1
| 1
| 3
| 7
| 3
| 13
| 58
| 15
| 38
| 23
| 32
| 5
| 33
| 21
| 29
| 3
| 1
| 1
| 6
|
2,166
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/dinov2/modeling_dinov2.py
|
transformers.models.dinov2.modeling_dinov2.Dinov2SelfOutput
|
from .configuration_dinov2 import Dinov2Config
from torch import nn
import torch
class Dinov2SelfOutput(nn.Module):
"""
The residual connection is defined in Dinov2Layer instead of here (as is the case with other models), due to the
layernorm applied before each block.
"""
def __init__(self, config: Dinov2Config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
return hidden_states
|
class Dinov2SelfOutput(nn.Module):
'''
The residual connection is defined in Dinov2Layer instead of here (as is the case with other models), due to the
layernorm applied before each block.
'''
def __init__(self, config: Dinov2Config):
pass
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
pass
| 3
| 1
| 5
| 1
| 4
| 0
| 1
| 0.44
| 1
| 3
| 1
| 0
| 2
| 2
| 2
| 12
| 16
| 3
| 9
| 5
| 6
| 4
| 9
| 5
| 6
| 1
| 1
| 0
| 2
|
2,167
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/dinov2/modeling_dinov2.py
|
transformers.models.dinov2.modeling_dinov2.Dinov2SwiGLUFFN
|
from torch import nn
import torch
class Dinov2SwiGLUFFN(nn.Module):
def __init__(self, config) -> None:
super().__init__()
in_features = out_features = config.hidden_size
hidden_features = int(config.hidden_size * config.mlp_ratio)
hidden_features = (int(hidden_features * 2 / 3) + 7) // 8 * 8
self.weights_in = nn.Linear(in_features, 2 * hidden_features, bias=True)
self.weights_out = nn.Linear(hidden_features, out_features, bias=True)
def forward(self, hidden_state: torch.Tensor) -> torch.Tensor:
hidden_state = self.weights_in(hidden_state)
x1, x2 = hidden_state.chunk(2, dim=-1)
hidden = nn.functional.silu(x1) * x2
return self.weights_out(hidden)
|
class Dinov2SwiGLUFFN(nn.Module):
def __init__(self, config) -> None:
pass
def forward(self, hidden_state: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 7
| 1
| 6
| 0
| 1
| 0
| 1
| 3
| 0
| 0
| 2
| 2
| 2
| 12
| 15
| 2
| 13
| 9
| 10
| 0
| 13
| 9
| 10
| 1
| 1
| 0
| 2
|
2,168
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/dinov2_with_registers/modeling_dinov2_with_registers.py
|
transformers.models.dinov2_with_registers.modeling_dinov2_with_registers.Dinov2WithRegistersAttention
|
from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer
import torch
from typing import Callable, Optional, Union
from torch import nn
from .configuration_dinov2_with_registers import Dinov2WithRegistersConfig
class Dinov2WithRegistersAttention(nn.Module):
def __init__(self, config: Dinov2WithRegistersConfig):
super().__init__()
self.attention = Dinov2WithRegistersSelfAttention(config)
self.output = Dinov2WithRegistersSelfOutput(config)
self.pruned_heads = set()
def prune_heads(self, heads: set[int]):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(heads, self.attention.num_attention_heads, self.attention.attention_head_size, self.pruned_heads)
self.attention.query = prune_linear_layer(self.attention.query, index)
self.attention.key = prune_linear_layer(self.attention.key, index)
self.attention.value = prune_linear_layer(self.attention.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
self.attention.num_attention_heads = self.attention.num_attention_heads - len(heads)
self.attention.all_head_size = self.attention.attention_head_size * self.attention.num_attention_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(self, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor]=None) -> torch.Tensor:
self_attn_output, _ = self.attention(hidden_states, head_mask)
output = self.output(self_attn_output, hidden_states)
return output
|
class Dinov2WithRegistersAttention(nn.Module):
def __init__(self, config: Dinov2WithRegistersConfig):
pass
def prune_heads(self, heads: set[int]):
pass
def forward(self, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor]=None) -> torch.Tensor:
pass
| 4
| 0
| 11
| 1
| 9
| 1
| 1
| 0.1
| 1
| 8
| 3
| 1
| 3
| 3
| 3
| 13
| 37
| 6
| 29
| 16
| 20
| 3
| 22
| 11
| 18
| 2
| 1
| 1
| 4
|
2,169
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/dinov2_with_registers/modeling_dinov2_with_registers.py
|
transformers.models.dinov2_with_registers.modeling_dinov2_with_registers.Dinov2WithRegistersBackbone
|
import torch
from ...utils import TransformersKwargs, auto_docstring, torch_int
from ...utils.generic import can_return_tuple, check_model_inputs
from ...utils.backbone_utils import BackboneMixin
from typing import Callable, Optional, Union
from torch import nn
from ...modeling_outputs import BackboneOutput, BaseModelOutput, BaseModelOutputWithPooling, ImageClassifierOutput
@auto_docstring(custom_intro='\n Dinov2WithRegisters backbone, to be used with frameworks like DETR and MaskFormer.\n ')
class Dinov2WithRegistersBackbone(Dinov2WithRegistersPreTrainedModel, BackboneMixin):
def __init__(self, config):
super().__init__(config)
super()._init_backbone(config)
self.num_features = [config.hidden_size for _ in range(config.num_hidden_layers + 1)]
self.embeddings = Dinov2WithRegistersEmbeddings(config)
self.encoder = Dinov2WithRegistersEncoder(config)
self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.num_register_tokens = config.num_register_tokens
self.post_init()
def get_input_embeddings(self) -> Dinov2WithRegistersPatchEmbeddings:
return self.embeddings.patch_embeddings
@check_model_inputs
@auto_docstring
def forward(self, pixel_values: torch.Tensor, output_hidden_states: Optional[bool]=None, **kwargs) -> BackboneOutput:
"""
Examples:
```python
>>> from transformers import AutoImageProcessor, AutoBackbone
>>> import torch
>>> from PIL import Image
>>> import requests
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> processor = AutoImageProcessor.from_pretrained("facebook/dinov2-with-registers-base")
>>> model = AutoBackbone.from_pretrained(
... "facebook/dinov2-with-registers-base", out_features=["stage2", "stage5", "stage8", "stage11"]
... )
>>> inputs = processor(image, return_tensors="pt")
>>> outputs = model(**inputs)
>>> feature_maps = outputs.feature_maps
>>> list(feature_maps[-1].shape)
[1, 768, 16, 16]
```"""
if output_hidden_states is None:
output_hidden_states = self.config.output_hidden_states
embedding_output = self.embeddings(pixel_values)
output: BaseModelOutput = self.encoder(embedding_output, output_hidden_states=True)
hidden_states = output.hidden_states
feature_maps = []
for stage, hidden_state in zip(self.stage_names, hidden_states):
if stage in self.out_features:
if self.config.apply_layernorm:
hidden_state = self.layernorm(hidden_state)
if self.config.reshape_hidden_states:
hidden_state = hidden_state[:, 1 + self.num_register_tokens:]
batch_size, _, height, width = pixel_values.shape
patch_size = self.config.patch_size
hidden_state = hidden_state.reshape(batch_size, height // patch_size, width // patch_size, -1)
hidden_state = hidden_state.permute(0, 3, 1, 2).contiguous()
feature_maps.append(hidden_state)
return BackboneOutput(feature_maps=tuple(feature_maps), hidden_states=hidden_states if output_hidden_states else None)
|
@auto_docstring(custom_intro='\n Dinov2WithRegisters backbone, to be used with frameworks like DETR and MaskFormer.\n ')
class Dinov2WithRegistersBackbone(Dinov2WithRegistersPreTrainedModel, BackboneMixin):
def __init__(self, config):
pass
def get_input_embeddings(self) -> Dinov2WithRegistersPatchEmbeddings:
pass
@check_model_inputs
@auto_docstring
def forward(self, pixel_values: torch.Tensor, output_hidden_states: Optional[bool]=None, **kwargs) -> BackboneOutput:
'''
Examples:
```python
>>> from transformers import AutoImageProcessor, AutoBackbone
>>> import torch
>>> from PIL import Image
>>> import requests
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> processor = AutoImageProcessor.from_pretrained("facebook/dinov2-with-registers-base")
>>> model = AutoBackbone.from_pretrained(
... "facebook/dinov2-with-registers-base", out_features=["stage2", "stage5", "stage8", "stage11"]
... )
>>> inputs = processor(image, return_tensors="pt")
>>> outputs = model(**inputs)
>>> feature_maps = outputs.feature_maps
>>> list(feature_maps[-1].shape)
[1, 768, 16, 16]
```'''
pass
| 7
| 1
| 31
| 6
| 17
| 8
| 5
| 0.46
| 2
| 9
| 4
| 0
| 3
| 5
| 3
| 16
| 98
| 19
| 54
| 24
| 42
| 25
| 37
| 17
| 33
| 13
| 2
| 3
| 15
|
2,170
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/dinov2_with_registers/modeling_dinov2_with_registers.py
|
transformers.models.dinov2_with_registers.modeling_dinov2_with_registers.Dinov2WithRegistersDropPath
|
from typing import Callable, Optional, Union
import torch
from torch import nn
class Dinov2WithRegistersDropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
def __init__(self, drop_prob: Optional[float]=None) -> None:
super().__init__()
self.drop_prob = drop_prob
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
return drop_path(hidden_states, self.drop_prob, self.training)
def extra_repr(self) -> str:
return f'p={self.drop_prob}'
|
class Dinov2WithRegistersDropPath(nn.Module):
'''Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).'''
def __init__(self, drop_prob: Optional[float]=None) -> None:
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
def extra_repr(self) -> str:
pass
| 4
| 1
| 2
| 0
| 2
| 0
| 1
| 0.13
| 1
| 4
| 0
| 0
| 3
| 1
| 3
| 13
| 12
| 3
| 8
| 5
| 4
| 1
| 8
| 5
| 4
| 1
| 1
| 0
| 3
|
2,171
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/dinov2_with_registers/modeling_dinov2_with_registers.py
|
transformers.models.dinov2_with_registers.modeling_dinov2_with_registers.Dinov2WithRegistersEmbeddings
|
import torch
from torch import nn
from ...utils import TransformersKwargs, auto_docstring, torch_int
from typing import Callable, Optional, Union
from .configuration_dinov2_with_registers import Dinov2WithRegistersConfig
class Dinov2WithRegistersEmbeddings(nn.Module):
"""
Construct the CLS token, mask token, register tokens, position and patch embeddings.
"""
def __init__(self, config: Dinov2WithRegistersConfig) -> None:
super().__init__()
self.cls_token = nn.Parameter(torch.randn(1, 1, config.hidden_size))
self.mask_token = nn.Parameter(torch.zeros(1, config.hidden_size))
self.register_tokens = nn.Parameter(torch.zeros(1, config.num_register_tokens, config.hidden_size))
self.patch_embeddings = Dinov2WithRegistersPatchEmbeddings(config)
num_patches = self.patch_embeddings.num_patches
self.position_embeddings = nn.Parameter(torch.randn(1, num_patches + 1, config.hidden_size))
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.patch_size = config.patch_size
self.config = config
def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor:
"""
This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher
resolution images. This implementation supports torch.jit tracing while maintaining backwards compatibility
with the original implementation.
Adapted from:
- https://github.com/facebookresearch/dino/blob/main/vision_transformer.py
- https://github.com/facebookresearch/dinov2/blob/main/dinov2/models/vision_transformer.py
"""
num_patches = embeddings.shape[1] - 1
num_positions = self.position_embeddings.shape[1] - 1
if not torch.jit.is_tracing() and num_patches == num_positions and (height == width):
return self.position_embeddings
class_pos_embed = self.position_embeddings[:, 0]
patch_pos_embed = self.position_embeddings[:, 1:]
dim = embeddings.shape[-1]
height = height // self.config.patch_size
width = width // self.config.patch_size
sqrt_num_positions = torch_int(num_positions ** 0.5)
patch_pos_embed = patch_pos_embed.reshape(1, sqrt_num_positions, sqrt_num_positions, dim)
patch_pos_embed = patch_pos_embed.permute(0, 3, 1, 2)
target_dtype = patch_pos_embed.dtype
patch_pos_embed = nn.functional.interpolate(patch_pos_embed.to(dtype=torch.float32), size=(torch_int(height), torch_int(width)), mode='bicubic', align_corners=False, antialias=True).to(dtype=target_dtype)
if not torch.jit.is_tracing():
if int(height) != patch_pos_embed.shape[-2] or int(width) != patch_pos_embed.shape[-1]:
raise ValueError('Width or height does not match with the interpolated position embeddings')
patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
return torch.cat((class_pos_embed.unsqueeze(0), patch_pos_embed), dim=1)
def forward(self, pixel_values: torch.Tensor, bool_masked_pos: Optional[torch.Tensor]=None) -> torch.Tensor:
batch_size, _, height, width = pixel_values.shape
target_dtype = self.patch_embeddings.projection.weight.dtype
embeddings = self.patch_embeddings(pixel_values.to(dtype=target_dtype))
if bool_masked_pos is not None:
embeddings = torch.where(bool_masked_pos.unsqueeze(-1), self.mask_token.to(embeddings.dtype).unsqueeze(0), embeddings)
cls_tokens = self.cls_token.expand(batch_size, -1, -1)
embeddings = torch.cat((cls_tokens, embeddings), dim=1)
embeddings = embeddings + self.interpolate_pos_encoding(embeddings, height, width)
embeddings = torch.cat((embeddings[:, :1], self.register_tokens.expand(embeddings.shape[0], -1, -1), embeddings[:, 1:]), dim=1)
embeddings = self.dropout(embeddings)
return embeddings
|
class Dinov2WithRegistersEmbeddings(nn.Module):
'''
Construct the CLS token, mask token, register tokens, position and patch embeddings.
'''
def __init__(self, config: Dinov2WithRegistersConfig) -> None:
pass
def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor:
'''
This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher
resolution images. This implementation supports torch.jit tracing while maintaining backwards compatibility
with the original implementation.
Adapted from:
- https://github.com/facebookresearch/dino/blob/main/vision_transformer.py
- https://github.com/facebookresearch/dinov2/blob/main/dinov2/models/vision_transformer.py
'''
pass
def forward(self, pixel_values: torch.Tensor, bool_masked_pos: Optional[torch.Tensor]=None) -> torch.Tensor:
pass
| 4
| 2
| 30
| 6
| 18
| 7
| 2
| 0.44
| 1
| 6
| 2
| 0
| 3
| 8
| 3
| 13
| 97
| 20
| 54
| 24
| 50
| 24
| 44
| 24
| 40
| 4
| 1
| 2
| 7
|
2,172
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/dinov2_with_registers/modeling_dinov2_with_registers.py
|
transformers.models.dinov2_with_registers.modeling_dinov2_with_registers.Dinov2WithRegistersEncoder
|
from typing import Callable, Optional, Union
import torch
from torch import nn
from .configuration_dinov2_with_registers import Dinov2WithRegistersConfig
from ...modeling_outputs import BackboneOutput, BaseModelOutput, BaseModelOutputWithPooling, ImageClassifierOutput
class Dinov2WithRegistersEncoder(nn.Module):
def __init__(self, config: Dinov2WithRegistersConfig):
super().__init__()
self.config = config
self.layer = nn.ModuleList([Dinov2WithRegistersLayer(config) for _ in range(config.num_hidden_layers)])
self.gradient_checkpointing = False
def forward(self, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor]=None, output_hidden_states: bool=False) -> BaseModelOutput:
all_hidden_states = [hidden_states] if output_hidden_states else None
for i, layer_module in enumerate(self.layer):
layer_head_mask = head_mask[i] if head_mask is not None else None
hidden_states = layer_module(hidden_states, layer_head_mask)
if all_hidden_states:
all_hidden_states.append(hidden_states)
return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=tuple(all_hidden_states) if all_hidden_states else None)
|
class Dinov2WithRegistersEncoder(nn.Module):
def __init__(self, config: Dinov2WithRegistersConfig):
pass
def forward(self, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor]=None, output_hidden_states: bool=False) -> BaseModelOutput:
pass
| 3
| 0
| 24
| 4
| 20
| 0
| 6
| 0
| 1
| 9
| 3
| 0
| 2
| 3
| 2
| 12
| 49
| 8
| 41
| 18
| 31
| 0
| 24
| 11
| 21
| 10
| 1
| 2
| 11
|
2,173
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/dinov2_with_registers/modeling_dinov2_with_registers.py
|
transformers.models.dinov2_with_registers.modeling_dinov2_with_registers.Dinov2WithRegistersForImageClassification
|
import torch
from ...processing_utils import Unpack
from ...utils import TransformersKwargs, auto_docstring, torch_int
from torch import nn
from typing import Callable, Optional, Union
from ...utils.generic import can_return_tuple, check_model_inputs
from ...modeling_outputs import BackboneOutput, BaseModelOutput, BaseModelOutputWithPooling, ImageClassifierOutput
from .configuration_dinov2_with_registers import Dinov2WithRegistersConfig
@auto_docstring(custom_intro='\n Dinov2WithRegisters Model transformer with an image classification head on top (a linear layer on top of the final hidden state\n of the [CLS] token) e.g. for ImageNet.\n ')
class Dinov2WithRegistersForImageClassification(Dinov2WithRegistersPreTrainedModel):
def __init__(self, config: Dinov2WithRegistersConfig) -> None:
super().__init__(config)
self.num_labels = config.num_labels
self.dinov2_with_registers = Dinov2WithRegistersModel(config)
self.classifier = nn.Linear(config.hidden_size * 2, config.num_labels) if config.num_labels > 0 else nn.Identity()
self.post_init()
@can_return_tuple
@auto_docstring
def forward(self, pixel_values: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, **kwargs: Unpack[TransformersKwargs]) -> ImageClassifierOutput:
"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
outputs: BaseModelOutputWithPooling = self.dinov2_with_registers(pixel_values, head_mask=head_mask, **kwargs)
sequence_output = outputs.last_hidden_state
cls_token = sequence_output[:, 0]
patch_tokens = sequence_output[:, 1 + self.config.num_register_tokens:]
linear_input = torch.cat([cls_token, patch_tokens.mean(dim=1)], dim=1)
logits = self.classifier(linear_input)
loss = None
if labels is not None:
loss = self.loss_function(labels, logits, self.config, **kwargs)
return ImageClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
|
@auto_docstring(custom_intro='\n Dinov2WithRegisters Model transformer with an image classification head on top (a linear layer on top of the final hidden state\n of the [CLS] token) e.g. for ImageNet.\n ')
class Dinov2WithRegistersForImageClassification(Dinov2WithRegistersPreTrainedModel):
def __init__(self, config: Dinov2WithRegistersConfig) -> None:
pass
@can_return_tuple
@auto_docstring
def forward(self, pixel_values: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, **kwargs: Unpack[TransformersKwargs]) -> ImageClassifierOutput:
'''
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
'''
pass
| 6
| 1
| 41
| 6
| 31
| 5
| 7
| 0.14
| 1
| 8
| 3
| 0
| 2
| 3
| 2
| 3
| 91
| 13
| 69
| 24
| 51
| 10
| 36
| 15
| 33
| 12
| 2
| 3
| 14
|
2,174
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/dinov2_with_registers/modeling_dinov2_with_registers.py
|
transformers.models.dinov2_with_registers.modeling_dinov2_with_registers.Dinov2WithRegistersLayer
|
from torch import nn
from ...modeling_layers import GradientCheckpointingLayer
from typing import Callable, Optional, Union
from .configuration_dinov2_with_registers import Dinov2WithRegistersConfig
import torch
class Dinov2WithRegistersLayer(GradientCheckpointingLayer):
"""This corresponds to the Block class in the original implementation."""
def __init__(self, config: Dinov2WithRegistersConfig) -> None:
super().__init__()
self.norm1 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.attention = Dinov2WithRegistersAttention(config)
self.layer_scale1 = Dinov2WithRegistersLayerScale(config)
self.drop_path = Dinov2WithRegistersDropPath(config.drop_path_rate) if config.drop_path_rate > 0.0 else nn.Identity()
self.norm2 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
if config.use_swiglu_ffn:
self.mlp = Dinov2WithRegistersSwiGLUFFN(config)
else:
self.mlp = Dinov2WithRegistersMLP(config)
self.layer_scale2 = Dinov2WithRegistersLayerScale(config)
def forward(self, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor]=None) -> torch.Tensor:
hidden_states_norm = self.norm1(hidden_states)
self_attention_output = self.attention(hidden_states_norm, head_mask)
self_attention_output = self.layer_scale1(self_attention_output)
hidden_states = self.drop_path(self_attention_output) + hidden_states
layer_output = self.norm2(hidden_states)
layer_output = self.mlp(layer_output)
layer_output = self.layer_scale2(layer_output)
layer_output = self.drop_path(layer_output) + hidden_states
return layer_output
|
class Dinov2WithRegistersLayer(GradientCheckpointingLayer):
'''This corresponds to the Block class in the original implementation.'''
def __init__(self, config: Dinov2WithRegistersConfig) -> None:
pass
def forward(self, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor]=None) -> torch.Tensor:
pass
| 3
| 1
| 24
| 5
| 18
| 3
| 2
| 0.17
| 1
| 8
| 5
| 0
| 2
| 7
| 2
| 12
| 51
| 11
| 36
| 19
| 28
| 6
| 24
| 14
| 21
| 3
| 1
| 1
| 4
|
2,175
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/dinov2_with_registers/modeling_dinov2_with_registers.py
|
transformers.models.dinov2_with_registers.modeling_dinov2_with_registers.Dinov2WithRegistersLayerScale
|
import torch
from torch import nn
class Dinov2WithRegistersLayerScale(nn.Module):
def __init__(self, config) -> None:
super().__init__()
self.lambda1 = nn.Parameter(config.layerscale_value * torch.ones(config.hidden_size))
def forward(self, hidden_state: torch.Tensor) -> torch.Tensor:
return hidden_state * self.lambda1
|
class Dinov2WithRegistersLayerScale(nn.Module):
def __init__(self, config) -> None:
pass
def forward(self, hidden_state: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 3
| 0
| 3
| 0
| 1
| 0
| 1
| 2
| 0
| 0
| 2
| 1
| 2
| 12
| 7
| 1
| 6
| 4
| 3
| 0
| 6
| 4
| 3
| 1
| 1
| 0
| 2
|
2,176
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/dinov2_with_registers/modeling_dinov2_with_registers.py
|
transformers.models.dinov2_with_registers.modeling_dinov2_with_registers.Dinov2WithRegistersMLP
|
from ...activations import ACT2FN
import torch
from torch import nn
class Dinov2WithRegistersMLP(nn.Module):
def __init__(self, config) -> None:
super().__init__()
in_features = out_features = config.hidden_size
hidden_features = int(config.hidden_size * config.mlp_ratio)
self.fc1 = nn.Linear(in_features, hidden_features, bias=True)
if isinstance(config.hidden_act, str):
self.activation = ACT2FN[config.hidden_act]
else:
self.activation = config.hidden_act
self.fc2 = nn.Linear(hidden_features, out_features, bias=True)
def forward(self, hidden_state: torch.Tensor) -> torch.Tensor:
hidden_state = self.fc1(hidden_state)
hidden_state = self.activation(hidden_state)
hidden_state = self.fc2(hidden_state)
return hidden_state
|
class Dinov2WithRegistersMLP(nn.Module):
def __init__(self, config) -> None:
pass
def forward(self, hidden_state: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 8
| 0
| 8
| 0
| 2
| 0
| 1
| 4
| 0
| 0
| 2
| 3
| 2
| 12
| 17
| 1
| 16
| 8
| 13
| 0
| 15
| 8
| 12
| 2
| 1
| 1
| 3
|
2,177
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/dinov2_with_registers/modeling_dinov2_with_registers.py
|
transformers.models.dinov2_with_registers.modeling_dinov2_with_registers.Dinov2WithRegistersModel
|
from torch import nn
from ...modeling_outputs import BackboneOutput, BaseModelOutput, BaseModelOutputWithPooling, ImageClassifierOutput
from typing import Callable, Optional, Union
import torch
from .configuration_dinov2_with_registers import Dinov2WithRegistersConfig
from ...utils.generic import can_return_tuple, check_model_inputs
from ...utils import TransformersKwargs, auto_docstring, torch_int
@auto_docstring
class Dinov2WithRegistersModel(Dinov2WithRegistersPreTrainedModel):
def __init__(self, config: Dinov2WithRegistersConfig):
super().__init__(config)
self.config = config
self.embeddings = Dinov2WithRegistersEmbeddings(config)
self.encoder = Dinov2WithRegistersEncoder(config)
self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.post_init()
def get_input_embeddings(self) -> Dinov2WithRegistersPatchEmbeddings:
return self.embeddings.patch_embeddings
def _prune_heads(self, heads_to_prune: dict[int, list[int]]) -> None:
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@check_model_inputs
@auto_docstring
def forward(self, pixel_values: Optional[torch.Tensor]=None, bool_masked_pos: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, output_hidden_states: Optional[bool]=None, **kwargs) -> BaseModelOutputWithPooling:
"""
bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, sequence_length)`):
Boolean masked positions. Indicates which patches are masked (1) and which aren't (0). Only relevant for
pre-training.
"""
if output_hidden_states is None:
output_hidden_states = self.config.output_hidden_states
if pixel_values is None:
raise ValueError('You have to specify pixel_values')
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
embedding_output = self.embeddings(pixel_values, bool_masked_pos=bool_masked_pos)
encoder_outputs: BaseModelOutput = self.encoder(embedding_output, head_mask=head_mask, output_hidden_states=output_hidden_states)
sequence_output = encoder_outputs.last_hidden_state
sequence_output = self.layernorm(sequence_output)
pooled_output = sequence_output[:, 0, :]
return BaseModelOutputWithPooling(last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states)
|
@auto_docstring
class Dinov2WithRegistersModel(Dinov2WithRegistersPreTrainedModel):
def __init__(self, config: Dinov2WithRegistersConfig):
pass
def get_input_embeddings(self) -> Dinov2WithRegistersPatchEmbeddings:
pass
def _prune_heads(self, heads_to_prune: dict[int, list[int]]) -> None:
'''
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
'''
pass
@check_model_inputs
@auto_docstring
def forward(self, pixel_values: Optional[torch.Tensor]=None, bool_masked_pos: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, output_hidden_states: Optional[bool]=None, **kwargs) -> BaseModelOutputWithPooling:
'''
bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, sequence_length)`):
Boolean masked positions. Indicates which patches are masked (1) and which aren't (0). Only relevant for
pre-training.
'''
pass
| 8
| 2
| 17
| 2
| 12
| 3
| 3
| 0.17
| 1
| 10
| 5
| 0
| 4
| 4
| 4
| 5
| 80
| 12
| 58
| 24
| 37
| 10
| 29
| 15
| 24
| 6
| 2
| 1
| 10
|
2,178
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/dinov2_with_registers/modeling_dinov2_with_registers.py
|
transformers.models.dinov2_with_registers.modeling_dinov2_with_registers.Dinov2WithRegistersPatchEmbeddings
|
import torch
import collections.abc
from torch import nn
class Dinov2WithRegistersPatchEmbeddings(nn.Module):
"""
This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial
`hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a
Transformer.
"""
def __init__(self, config):
super().__init__()
image_size, patch_size = (config.image_size, config.patch_size)
num_channels, hidden_size = (config.num_channels, config.hidden_size)
image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size)
patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size)
num_patches = image_size[1] // patch_size[1] * (image_size[0] // patch_size[0])
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.num_patches = num_patches
self.projection = nn.Conv2d(num_channels, hidden_size, kernel_size=patch_size, stride=patch_size)
def forward(self, pixel_values: torch.Tensor) -> torch.Tensor:
num_channels = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(f'Make sure that the channel dimension of the pixel values match with the one set in the configuration. Expected {self.num_channels} but got {num_channels}.')
embeddings = self.projection(pixel_values).flatten(2).transpose(1, 2)
return embeddings
|
class Dinov2WithRegistersPatchEmbeddings(nn.Module):
'''
This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial
`hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a
Transformer.
'''
def __init__(self, config):
pass
def forward(self, pixel_values: torch.Tensor) -> torch.Tensor:
pass
| 3
| 1
| 12
| 1
| 11
| 0
| 3
| 0.23
| 1
| 4
| 0
| 0
| 2
| 5
| 2
| 12
| 31
| 4
| 22
| 13
| 19
| 5
| 19
| 13
| 16
| 3
| 1
| 1
| 5
|
2,179
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/dinov2_with_registers/modeling_dinov2_with_registers.py
|
transformers.models.dinov2_with_registers.modeling_dinov2_with_registers.Dinov2WithRegistersPreTrainedModel
|
from .configuration_dinov2_with_registers import Dinov2WithRegistersConfig
from ...utils import TransformersKwargs, auto_docstring, torch_int
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
import torch
from typing import Callable, Optional, Union
from torch import nn
@auto_docstring
class Dinov2WithRegistersPreTrainedModel(PreTrainedModel):
config: Dinov2WithRegistersConfig
base_model_prefix = 'dinov2_with_registers'
main_input_name = 'pixel_values'
supports_gradient_checkpointing = True
_no_split_modules = ['Dinov2WithRegistersLayer']
_supports_sdpa = True
_supports_flash_attn = True
_supports_flex_attn = True
_supports_attention_backend = True
_can_record_outputs = {'attentions': Dinov2WithRegistersSelfAttention}
def _init_weights(self, module: Union[nn.Linear, nn.Conv2d, nn.LayerNorm]) -> None:
"""Initialize the weights"""
if isinstance(module, (nn.Linear, nn.Conv2d)):
module.weight.data = nn.init.trunc_normal_(module.weight.data.to(torch.float32), mean=0.0, std=self.config.initializer_range).to(module.weight.dtype)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
elif isinstance(module, Dinov2WithRegistersEmbeddings):
module.position_embeddings.data = nn.init.trunc_normal_(module.position_embeddings.data.to(torch.float32), mean=0.0, std=self.config.initializer_range).to(module.position_embeddings.dtype)
module.cls_token.data = nn.init.trunc_normal_(module.cls_token.data.to(torch.float32), mean=0.0, std=self.config.initializer_range).to(module.cls_token.dtype)
module.mask_token.data.zero_()
module.register_tokens.data.zero_()
elif isinstance(module, Dinov2WithRegistersLayerScale):
module.lambda1.data.fill_(self.config.layerscale_value)
|
@auto_docstring
class Dinov2WithRegistersPreTrainedModel(PreTrainedModel):
def _init_weights(self, module: Union[nn.Linear, nn.Conv2d, nn.LayerNorm]) -> None:
'''Initialize the weights'''
pass
| 3
| 1
| 25
| 1
| 21
| 3
| 5
| 0.25
| 1
| 1
| 1
| 3
| 1
| 0
| 1
| 1
| 38
| 3
| 28
| 8
| 26
| 7
| 16
| 8
| 14
| 5
| 1
| 2
| 5
|
2,180
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/dinov2_with_registers/modeling_dinov2_with_registers.py
|
transformers.models.dinov2_with_registers.modeling_dinov2_with_registers.Dinov2WithRegistersSelfAttention
|
import torch
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from typing import Callable, Optional, Union
from .configuration_dinov2_with_registers import Dinov2WithRegistersConfig
from torch import nn
class Dinov2WithRegistersSelfAttention(nn.Module):
def __init__(self, config: Dinov2WithRegistersConfig):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and (not hasattr(config, 'embedding_size')):
raise ValueError(f'The hidden size {config.hidden_size} is not a multiple of the number of attention heads {config.num_attention_heads}.')
self.config = config
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.dropout_prob = config.attention_probs_dropout_prob
self.scaling = self.attention_head_size ** (-0.5)
self.is_causal = False
self.query = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias)
self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias)
self.value = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias)
def forward(self, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor]=None) -> tuple[torch.Tensor, torch.Tensor]:
batch_size = hidden_states.shape[0]
new_shape = (batch_size, -1, self.num_attention_heads, self.attention_head_size)
key_layer = self.key(hidden_states).view(*new_shape).transpose(1, 2)
value_layer = self.value(hidden_states).view(*new_shape).transpose(1, 2)
query_layer = self.query(hidden_states).view(*new_shape).transpose(1, 2)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != 'eager':
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
context_layer, attention_probs = attention_interface(self, query_layer, key_layer, value_layer, head_mask, is_causal=self.is_causal, scaling=self.scaling, dropout=0.0 if not self.training else self.dropout_prob)
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.reshape(new_context_layer_shape)
return (context_layer, attention_probs)
|
class Dinov2WithRegistersSelfAttention(nn.Module):
def __init__(self, config: Dinov2WithRegistersConfig):
pass
def forward(self, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor]=None) -> tuple[torch.Tensor, torch.Tensor]:
pass
| 3
| 0
| 18
| 4
| 12
| 2
| 2
| 0.13
| 1
| 6
| 1
| 1
| 3
| 7
| 3
| 13
| 58
| 15
| 38
| 23
| 32
| 5
| 33
| 21
| 29
| 3
| 1
| 1
| 6
|
2,181
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/dinov2_with_registers/modeling_dinov2_with_registers.py
|
transformers.models.dinov2_with_registers.modeling_dinov2_with_registers.Dinov2WithRegistersSelfOutput
|
from .configuration_dinov2_with_registers import Dinov2WithRegistersConfig
import torch
from torch import nn
class Dinov2WithRegistersSelfOutput(nn.Module):
"""
The residual connection is defined in Dinov2WithRegistersLayer instead of here (as is the case with other models), due to the
layernorm applied before each block.
"""
def __init__(self, config: Dinov2WithRegistersConfig):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
return hidden_states
|
class Dinov2WithRegistersSelfOutput(nn.Module):
'''
The residual connection is defined in Dinov2WithRegistersLayer instead of here (as is the case with other models), due to the
layernorm applied before each block.
'''
def __init__(self, config: Dinov2WithRegistersConfig):
pass
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
pass
| 3
| 1
| 5
| 1
| 4
| 0
| 1
| 0.44
| 1
| 3
| 1
| 0
| 2
| 2
| 2
| 12
| 16
| 3
| 9
| 5
| 6
| 4
| 9
| 5
| 6
| 1
| 1
| 0
| 2
|
2,182
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/dinov2_with_registers/modeling_dinov2_with_registers.py
|
transformers.models.dinov2_with_registers.modeling_dinov2_with_registers.Dinov2WithRegistersSwiGLUFFN
|
from torch import nn
import torch
class Dinov2WithRegistersSwiGLUFFN(nn.Module):
def __init__(self, config) -> None:
super().__init__()
in_features = out_features = config.hidden_size
hidden_features = int(config.hidden_size * config.mlp_ratio)
hidden_features = (int(hidden_features * 2 / 3) + 7) // 8 * 8
self.weights_in = nn.Linear(in_features, 2 * hidden_features, bias=True)
self.weights_out = nn.Linear(hidden_features, out_features, bias=True)
def forward(self, hidden_state: torch.Tensor) -> torch.Tensor:
hidden_state = self.weights_in(hidden_state)
x1, x2 = hidden_state.chunk(2, dim=-1)
hidden = nn.functional.silu(x1) * x2
return self.weights_out(hidden)
|
class Dinov2WithRegistersSwiGLUFFN(nn.Module):
def __init__(self, config) -> None:
pass
def forward(self, hidden_state: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 7
| 1
| 6
| 0
| 1
| 0
| 1
| 3
| 0
| 0
| 2
| 2
| 2
| 12
| 15
| 2
| 13
| 9
| 10
| 0
| 13
| 9
| 10
| 1
| 1
| 0
| 2
|
2,183
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/dinov2_with_registers/modular_dinov2_with_registers.py
|
transformers.models.dinov2_with_registers.modular_dinov2_with_registers.Dinov2WithRegistersBackbone
|
from ...modeling_outputs import BackboneOutput, BaseModelOutput, BaseModelOutputWithPooling, ImageClassifierOutput
from typing import Optional, Union
import torch
from ....transformers.models.dinov2.modeling_dinov2 import Dinov2Backbone, Dinov2Encoder, Dinov2ForImageClassification, Dinov2Model, Dinov2PatchEmbeddings, Dinov2PreTrainedModel
from torch import nn
class Dinov2WithRegistersBackbone(Dinov2Backbone):
def __init__(self, config):
super().__init__(config)
super()._init_backbone(config)
self.num_register_tokens = config.num_register_tokens
self.num_features = [config.hidden_size for _ in range(config.num_hidden_layers + 1)]
self.embeddings = Dinov2WithRegistersEmbeddings(config)
self.encoder = Dinov2WithRegistersEncoder(config)
self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.post_init()
def get_input_embeddings(self) -> Dinov2WithRegistersPatchEmbeddings:
return self.embeddings.patch_embeddings
def forward(self, pixel_values: torch.Tensor, output_hidden_states: Optional[bool]=None, **kwargs) -> BackboneOutput:
"""
Examples:
```python
>>> from transformers import AutoImageProcessor, AutoBackbone
>>> import torch
>>> from PIL import Image
>>> import requests
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> processor = AutoImageProcessor.from_pretrained("facebook/dinov2-with-registers-base")
>>> model = AutoBackbone.from_pretrained(
... "facebook/dinov2-with-registers-base", out_features=["stage2", "stage5", "stage8", "stage11"]
... )
>>> inputs = processor(image, return_tensors="pt")
>>> outputs = model(**inputs)
>>> feature_maps = outputs.feature_maps
>>> list(feature_maps[-1].shape)
[1, 768, 16, 16]
```"""
if output_hidden_states is None:
output_hidden_states = self.config.output_hidden_states
embedding_output = self.embeddings(pixel_values)
output: BaseModelOutput = self.encoder(embedding_output, output_hidden_states=True)
hidden_states = output.hidden_states
feature_maps = []
for stage, hidden_state in zip(self.stage_names, hidden_states):
if stage in self.out_features:
if self.config.apply_layernorm:
hidden_state = self.layernorm(hidden_state)
if self.config.reshape_hidden_states:
hidden_state = hidden_state[:, 1 + self.num_register_tokens:]
batch_size, _, height, width = pixel_values.shape
patch_size = self.config.patch_size
hidden_state = hidden_state.reshape(batch_size, height // patch_size, width // patch_size, -1)
hidden_state = hidden_state.permute(0, 3, 1, 2).contiguous()
feature_maps.append(hidden_state)
return BackboneOutput(feature_maps=tuple(feature_maps), hidden_states=hidden_states if output_hidden_states else None)
|
class Dinov2WithRegistersBackbone(Dinov2Backbone):
def __init__(self, config):
pass
def get_input_embeddings(self) -> Dinov2WithRegistersPatchEmbeddings:
pass
def forward(self, pixel_values: torch.Tensor, output_hidden_states: Optional[bool]=None, **kwargs) -> BackboneOutput:
'''
Examples:
```python
>>> from transformers import AutoImageProcessor, AutoBackbone
>>> import torch
>>> from PIL import Image
>>> import requests
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> processor = AutoImageProcessor.from_pretrained("facebook/dinov2-with-registers-base")
>>> model = AutoBackbone.from_pretrained(
... "facebook/dinov2-with-registers-base", out_features=["stage2", "stage5", "stage8", "stage11"]
... )
>>> inputs = processor(image, return_tensors="pt")
>>> outputs = model(**inputs)
>>> feature_maps = outputs.feature_maps
>>> list(feature_maps[-1].shape)
[1, 768, 16, 16]
```'''
pass
| 4
| 1
| 30
| 5
| 17
| 8
| 5
| 0.44
| 1
| 9
| 4
| 0
| 3
| 5
| 3
| 19
| 92
| 17
| 52
| 23
| 42
| 23
| 37
| 17
| 33
| 13
| 3
| 3
| 15
|
2,184
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/dinov2_with_registers/modular_dinov2_with_registers.py
|
transformers.models.dinov2_with_registers.modular_dinov2_with_registers.Dinov2WithRegistersConfig
|
from ...configuration_utils import PretrainedConfig
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
class Dinov2WithRegistersConfig(BackboneConfigMixin, PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`Dinov2WithRegistersModel`]. It is used to instantiate an
Dinov2WithRegisters model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the DINOv2 with Registers
[facebook/dinov2-with-registers-base](https://huggingface.co/facebook/dinov2-with-registers-base) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
hidden_size (`int`, *optional*, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
mlp_ratio (`int`, *optional*, defaults to 4):
Ratio of the hidden size of the MLPs relative to the `hidden_size`.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` are supported.
hidden_dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon used by the layer normalization layers.
image_size (`int`, *optional*, defaults to 224):
The size (resolution) of each image.
patch_size (`int`, *optional*, defaults to 16):
The size (resolution) of each patch.
num_channels (`int`, *optional*, defaults to 3):
The number of input channels.
qkv_bias (`bool`, *optional*, defaults to `True`):
Whether to add a bias to the queries, keys and values.
layerscale_value (`float`, *optional*, defaults to 1.0):
Initial value to use for layer scale.
drop_path_rate (`float`, *optional*, defaults to 0.0):
Stochastic depth rate per sample (when applied in the main path of residual layers).
use_swiglu_ffn (`bool`, *optional*, defaults to `False`):
Whether to use the SwiGLU feedforward neural network.
num_register_tokens (`int`, *optional*, defaults to 4):
Number of register tokens to use.
out_features (`list[str]`, *optional*):
If used as backbone, list of features to output. Can be any of `"stem"`, `"stage1"`, `"stage2"`, etc.
(depending on how many stages the model has). If unset and `out_indices` is set, will default to the
corresponding stages. If unset and `out_indices` is unset, will default to the last stage. Must be in the
same order as defined in the `stage_names` attribute.
out_indices (`list[int]`, *optional*):
If used as backbone, list of indices of features to output. Can be any of 0, 1, 2, etc. (depending on how
many stages the model has). If unset and `out_features` is set, will default to the corresponding stages.
If unset and `out_features` is unset, will default to the last stage. Must be in the
same order as defined in the `stage_names` attribute.
apply_layernorm (`bool`, *optional*, defaults to `True`):
Whether to apply layer normalization to the feature maps in case the model is used as backbone.
reshape_hidden_states (`bool`, *optional*, defaults to `True`):
Whether to reshape the feature maps to 4D tensors of shape `(batch_size, hidden_size, height, width)` in
case the model is used as backbone. If `False`, the feature maps will be 3D tensors of shape `(batch_size,
seq_len, hidden_size)`.
Example:
```python
>>> from transformers import Dinov2WithRegistersConfig, Dinov2WithRegistersModel
>>> # Initializing a Dinov2WithRegisters base style configuration
>>> configuration = Dinov2WithRegistersConfig()
>>> # Initializing a model (with random weights) from the base style configuration
>>> model = Dinov2WithRegistersModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = 'dinov2_with_registers'
def __init__(self, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, mlp_ratio=4, hidden_act='gelu', hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, initializer_range=0.02, layer_norm_eps=1e-06, image_size=224, patch_size=16, num_channels=3, qkv_bias=True, layerscale_value=1.0, drop_path_rate=0.0, use_swiglu_ffn=False, num_register_tokens=4, out_features=None, out_indices=None, apply_layernorm=True, reshape_hidden_states=True, **kwargs):
super().__init__(**kwargs)
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.mlp_ratio = mlp_ratio
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.qkv_bias = qkv_bias
self.layerscale_value = layerscale_value
self.drop_path_rate = drop_path_rate
self.use_swiglu_ffn = use_swiglu_ffn
self.num_register_tokens = num_register_tokens
self.stage_names = ['stem'] + [f'stage{idx}' for idx in range(1, num_hidden_layers + 1)]
self._out_features, self._out_indices = get_aligned_output_features_output_indices(out_features=out_features, out_indices=out_indices, stage_names=self.stage_names)
self.apply_layernorm = apply_layernorm
self.reshape_hidden_states = reshape_hidden_states
|
class Dinov2WithRegistersConfig(BackboneConfigMixin, PretrainedConfig):
'''
This is the configuration class to store the configuration of a [`Dinov2WithRegistersModel`]. It is used to instantiate an
Dinov2WithRegisters model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the DINOv2 with Registers
[facebook/dinov2-with-registers-base](https://huggingface.co/facebook/dinov2-with-registers-base) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
hidden_size (`int`, *optional*, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
mlp_ratio (`int`, *optional*, defaults to 4):
Ratio of the hidden size of the MLPs relative to the `hidden_size`.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` are supported.
hidden_dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon used by the layer normalization layers.
image_size (`int`, *optional*, defaults to 224):
The size (resolution) of each image.
patch_size (`int`, *optional*, defaults to 16):
The size (resolution) of each patch.
num_channels (`int`, *optional*, defaults to 3):
The number of input channels.
qkv_bias (`bool`, *optional*, defaults to `True`):
Whether to add a bias to the queries, keys and values.
layerscale_value (`float`, *optional*, defaults to 1.0):
Initial value to use for layer scale.
drop_path_rate (`float`, *optional*, defaults to 0.0):
Stochastic depth rate per sample (when applied in the main path of residual layers).
use_swiglu_ffn (`bool`, *optional*, defaults to `False`):
Whether to use the SwiGLU feedforward neural network.
num_register_tokens (`int`, *optional*, defaults to 4):
Number of register tokens to use.
out_features (`list[str]`, *optional*):
If used as backbone, list of features to output. Can be any of `"stem"`, `"stage1"`, `"stage2"`, etc.
(depending on how many stages the model has). If unset and `out_indices` is set, will default to the
corresponding stages. If unset and `out_indices` is unset, will default to the last stage. Must be in the
same order as defined in the `stage_names` attribute.
out_indices (`list[int]`, *optional*):
If used as backbone, list of indices of features to output. Can be any of 0, 1, 2, etc. (depending on how
many stages the model has). If unset and `out_features` is set, will default to the corresponding stages.
If unset and `out_features` is unset, will default to the last stage. Must be in the
same order as defined in the `stage_names` attribute.
apply_layernorm (`bool`, *optional*, defaults to `True`):
Whether to apply layer normalization to the feature maps in case the model is used as backbone.
reshape_hidden_states (`bool`, *optional*, defaults to `True`):
Whether to reshape the feature maps to 4D tensors of shape `(batch_size, hidden_size, height, width)` in
case the model is used as backbone. If `False`, the feature maps will be 3D tensors of shape `(batch_size,
seq_len, hidden_size)`.
Example:
```python
>>> from transformers import Dinov2WithRegistersConfig, Dinov2WithRegistersModel
>>> # Initializing a Dinov2WithRegisters base style configuration
>>> configuration = Dinov2WithRegistersConfig()
>>> # Initializing a model (with random weights) from the base style configuration
>>> model = Dinov2WithRegistersModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```'''
def __init__(self, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, mlp_ratio=4, hidden_act='gelu', hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, initializer_range=0.02, layer_norm_eps=1e-06, image_size=224, patch_size=16, num_channels=3, qkv_bias=True, layerscale_value=1.0, drop_path_rate=0.0, use_swiglu_ffn=False, num_register_tokens=4, out_features=None, out_indices=None, apply_layernorm=True, reshape_hidden_states=True, **kwargs):
pass
| 2
| 1
| 50
| 1
| 49
| 0
| 1
| 1.35
| 2
| 2
| 0
| 0
| 1
| 22
| 1
| 38
| 130
| 10
| 51
| 48
| 25
| 69
| 25
| 24
| 23
| 1
| 2
| 0
| 1
|
2,185
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/dinov2_with_registers/modular_dinov2_with_registers.py
|
transformers.models.dinov2_with_registers.modular_dinov2_with_registers.Dinov2WithRegistersEmbeddings
|
from torch import nn
import torch
from ...utils import TransformersKwargs, logging, torch_int
from typing import Optional, Union
class Dinov2WithRegistersEmbeddings(nn.Module):
"""
Construct the CLS token, mask token, register tokens, position and patch embeddings.
"""
def __init__(self, config: Dinov2WithRegistersConfig) -> None:
super().__init__()
self.cls_token = nn.Parameter(torch.randn(1, 1, config.hidden_size))
self.mask_token = nn.Parameter(torch.zeros(1, config.hidden_size))
self.register_tokens = nn.Parameter(torch.zeros(1, config.num_register_tokens, config.hidden_size))
self.patch_embeddings = Dinov2WithRegistersPatchEmbeddings(config)
num_patches = self.patch_embeddings.num_patches
self.position_embeddings = nn.Parameter(torch.randn(1, num_patches + 1, config.hidden_size))
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.patch_size = config.patch_size
self.config = config
def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor:
"""
This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher
resolution images. This implementation supports torch.jit tracing while maintaining backwards compatibility
with the original implementation.
Adapted from:
- https://github.com/facebookresearch/dino/blob/main/vision_transformer.py
- https://github.com/facebookresearch/dinov2/blob/main/dinov2/models/vision_transformer.py
"""
num_patches = embeddings.shape[1] - 1
num_positions = self.position_embeddings.shape[1] - 1
if not torch.jit.is_tracing() and num_patches == num_positions and (height == width):
return self.position_embeddings
class_pos_embed = self.position_embeddings[:, 0]
patch_pos_embed = self.position_embeddings[:, 1:]
dim = embeddings.shape[-1]
height = height // self.config.patch_size
width = width // self.config.patch_size
sqrt_num_positions = torch_int(num_positions ** 0.5)
patch_pos_embed = patch_pos_embed.reshape(1, sqrt_num_positions, sqrt_num_positions, dim)
patch_pos_embed = patch_pos_embed.permute(0, 3, 1, 2)
target_dtype = patch_pos_embed.dtype
patch_pos_embed = nn.functional.interpolate(patch_pos_embed.to(dtype=torch.float32), size=(torch_int(height), torch_int(width)), mode='bicubic', align_corners=False, antialias=True).to(dtype=target_dtype)
if not torch.jit.is_tracing():
if int(height) != patch_pos_embed.shape[-2] or int(width) != patch_pos_embed.shape[-1]:
raise ValueError('Width or height does not match with the interpolated position embeddings')
patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
return torch.cat((class_pos_embed.unsqueeze(0), patch_pos_embed), dim=1)
def forward(self, pixel_values: torch.Tensor, bool_masked_pos: Optional[torch.Tensor]=None) -> torch.Tensor:
batch_size, _, height, width = pixel_values.shape
target_dtype = self.patch_embeddings.projection.weight.dtype
embeddings = self.patch_embeddings(pixel_values.to(dtype=target_dtype))
if bool_masked_pos is not None:
embeddings = torch.where(bool_masked_pos.unsqueeze(-1), self.mask_token.to(embeddings.dtype).unsqueeze(0), embeddings)
cls_tokens = self.cls_token.expand(batch_size, -1, -1)
embeddings = torch.cat((cls_tokens, embeddings), dim=1)
embeddings = embeddings + self.interpolate_pos_encoding(embeddings, height, width)
embeddings = torch.cat((embeddings[:, :1], self.register_tokens.expand(embeddings.shape[0], -1, -1), embeddings[:, 1:]), dim=1)
embeddings = self.dropout(embeddings)
return embeddings
|
class Dinov2WithRegistersEmbeddings(nn.Module):
'''
Construct the CLS token, mask token, register tokens, position and patch embeddings.
'''
def __init__(self, config: Dinov2WithRegistersConfig) -> None:
pass
def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor:
'''
This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher
resolution images. This implementation supports torch.jit tracing while maintaining backwards compatibility
with the original implementation.
Adapted from:
- https://github.com/facebookresearch/dino/blob/main/vision_transformer.py
- https://github.com/facebookresearch/dinov2/blob/main/dinov2/models/vision_transformer.py
'''
pass
def forward(self, pixel_values: torch.Tensor, bool_masked_pos: Optional[torch.Tensor]=None) -> torch.Tensor:
pass
| 4
| 2
| 30
| 6
| 18
| 7
| 2
| 0.44
| 1
| 6
| 2
| 0
| 3
| 8
| 3
| 13
| 97
| 20
| 54
| 24
| 50
| 24
| 44
| 24
| 40
| 4
| 1
| 2
| 7
|
2,186
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/dinov2_with_registers/modular_dinov2_with_registers.py
|
transformers.models.dinov2_with_registers.modular_dinov2_with_registers.Dinov2WithRegistersEncoder
|
from ....transformers.models.dinov2.modeling_dinov2 import Dinov2Backbone, Dinov2Encoder, Dinov2ForImageClassification, Dinov2Model, Dinov2PatchEmbeddings, Dinov2PreTrainedModel
class Dinov2WithRegistersEncoder(Dinov2Encoder):
pass
|
class Dinov2WithRegistersEncoder(Dinov2Encoder):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 12
| 2
| 0
| 2
| 1
| 1
| 0
| 2
| 1
| 1
| 0
| 2
| 0
| 0
|
2,187
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/dinov2_with_registers/modular_dinov2_with_registers.py
|
transformers.models.dinov2_with_registers.modular_dinov2_with_registers.Dinov2WithRegistersForImageClassification
|
from typing import Optional, Union
import torch
from ...processing_utils import Unpack
from ...modeling_outputs import BackboneOutput, BaseModelOutput, BaseModelOutputWithPooling, ImageClassifierOutput
from ....transformers.models.dinov2.modeling_dinov2 import Dinov2Backbone, Dinov2Encoder, Dinov2ForImageClassification, Dinov2Model, Dinov2PatchEmbeddings, Dinov2PreTrainedModel
from ...utils import TransformersKwargs, logging, torch_int
class Dinov2WithRegistersForImageClassification(Dinov2ForImageClassification):
def forward(self, pixel_values: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, **kwargs: Unpack[TransformersKwargs]) -> ImageClassifierOutput:
"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
outputs: BaseModelOutputWithPooling = self.dinov2_with_registers(pixel_values, head_mask=head_mask, **kwargs)
sequence_output = outputs.last_hidden_state
cls_token = sequence_output[:, 0]
patch_tokens = sequence_output[:, 1 + self.config.num_register_tokens:]
linear_input = torch.cat([cls_token, patch_tokens.mean(dim=1)], dim=1)
logits = self.classifier(linear_input)
loss = None
if labels is not None:
loss = self.loss_function(labels, logits, self.config, **kwargs)
return ImageClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
|
class Dinov2WithRegistersForImageClassification(Dinov2ForImageClassification):
def forward(self, pixel_values: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, **kwargs: Unpack[TransformersKwargs]) -> ImageClassifierOutput:
'''
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
'''
pass
| 2
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 3
| 2
| 0
| 2
| 1
| 1
| 0
| 2
| 1
| 1
| 0
| 3
| 0
| 0
|
2,188
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/dinov2_with_registers/modular_dinov2_with_registers.py
|
transformers.models.dinov2_with_registers.modular_dinov2_with_registers.Dinov2WithRegistersModel
|
from ....transformers.models.dinov2.modeling_dinov2 import Dinov2Backbone, Dinov2Encoder, Dinov2ForImageClassification, Dinov2Model, Dinov2PatchEmbeddings, Dinov2PreTrainedModel
class Dinov2WithRegistersModel(Dinov2Model):
pass
|
class Dinov2WithRegistersModel(Dinov2Model):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 5
| 2
| 0
| 2
| 1
| 1
| 0
| 2
| 1
| 1
| 0
| 3
| 0
| 0
|
2,189
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/dinov2_with_registers/modular_dinov2_with_registers.py
|
transformers.models.dinov2_with_registers.modular_dinov2_with_registers.Dinov2WithRegistersPatchEmbeddings
|
from ....transformers.models.dinov2.modeling_dinov2 import Dinov2Backbone, Dinov2Encoder, Dinov2ForImageClassification, Dinov2Model, Dinov2PatchEmbeddings, Dinov2PreTrainedModel
class Dinov2WithRegistersPatchEmbeddings(Dinov2PatchEmbeddings):
pass
|
class Dinov2WithRegistersPatchEmbeddings(Dinov2PatchEmbeddings):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 12
| 2
| 0
| 2
| 1
| 1
| 0
| 2
| 1
| 1
| 0
| 2
| 0
| 0
|
2,190
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/dinov2_with_registers/modular_dinov2_with_registers.py
|
transformers.models.dinov2_with_registers.modular_dinov2_with_registers.Dinov2WithRegistersPreTrainedModel
|
from ....transformers.models.dinov2.modeling_dinov2 import Dinov2Backbone, Dinov2Encoder, Dinov2ForImageClassification, Dinov2Model, Dinov2PatchEmbeddings, Dinov2PreTrainedModel
from torch import nn
import torch
from typing import Optional, Union
class Dinov2WithRegistersPreTrainedModel(Dinov2PreTrainedModel):
def _init_weights(self, module: Union[nn.Linear, nn.Conv2d, nn.LayerNorm]) -> None:
"""Initialize the weights"""
if isinstance(module, (nn.Linear, nn.Conv2d)):
module.weight.data = nn.init.trunc_normal_(module.weight.data.to(torch.float32), mean=0.0, std=self.config.initializer_range).to(module.weight.dtype)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
elif isinstance(module, Dinov2WithRegistersEmbeddings):
module.position_embeddings.data = nn.init.trunc_normal_(module.position_embeddings.data.to(torch.float32), mean=0.0, std=self.config.initializer_range).to(module.position_embeddings.dtype)
module.cls_token.data = nn.init.trunc_normal_(module.cls_token.data.to(torch.float32), mean=0.0, std=self.config.initializer_range).to(module.cls_token.dtype)
module.mask_token.data.zero_()
module.register_tokens.data.zero_()
elif isinstance(module, Dinov2WithRegistersLayerScale):
module.lambda1.data.fill_(self.config.layerscale_value)
|
class Dinov2WithRegistersPreTrainedModel(Dinov2PreTrainedModel):
def _init_weights(self, module: Union[nn.Linear, nn.Conv2d, nn.LayerNorm]) -> None:
'''Initialize the weights'''
pass
| 2
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 2
| 0
| 2
| 1
| 1
| 0
| 2
| 1
| 1
| 0
| 2
| 0
| 0
|
2,191
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/distilbert/configuration_distilbert.py
|
transformers.models.distilbert.configuration_distilbert.DistilBertConfig
|
from ...configuration_utils import PretrainedConfig
class DistilBertConfig(PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`DistilBertModel`] or a [`TFDistilBertModel`]. It
is used to instantiate a DistilBERT model according to the specified arguments, defining the model architecture.
Instantiating a configuration with the defaults will yield a similar configuration to that of the DistilBERT
[distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 30522):
Vocabulary size of the DistilBERT model. Defines the number of different tokens that can be represented by
the `inputs_ids` passed when calling [`DistilBertModel`] or [`TFDistilBertModel`].
max_position_embeddings (`int`, *optional*, defaults to 512):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
sinusoidal_pos_embds (`boolean`, *optional*, defaults to `False`):
Whether to use sinusoidal positional embeddings.
n_layers (`int`, *optional*, defaults to 6):
Number of hidden layers in the Transformer encoder.
n_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
dim (`int`, *optional*, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
hidden_dim (`int`, *optional*, defaults to 3072):
The size of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
dropout (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_dropout (`float`, *optional*, defaults to 0.1):
The dropout ratio for the attention probabilities.
activation (`str` or `Callable`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"silu"` and `"gelu_new"` are supported.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
qa_dropout (`float`, *optional*, defaults to 0.1):
The dropout probabilities used in the question answering model [`DistilBertForQuestionAnswering`].
seq_classif_dropout (`float`, *optional*, defaults to 0.2):
The dropout probabilities used in the sequence classification and the multiple choice model
[`DistilBertForSequenceClassification`].
Examples:
```python
>>> from transformers import DistilBertConfig, DistilBertModel
>>> # Initializing a DistilBERT configuration
>>> configuration = DistilBertConfig()
>>> # Initializing a model (with random weights) from the configuration
>>> model = DistilBertModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = 'distilbert'
attribute_map = {'hidden_size': 'dim', 'num_attention_heads': 'n_heads', 'num_hidden_layers': 'n_layers'}
def __init__(self, vocab_size=30522, max_position_embeddings=512, sinusoidal_pos_embds=False, n_layers=6, n_heads=12, dim=768, hidden_dim=4 * 768, dropout=0.1, attention_dropout=0.1, activation='gelu', initializer_range=0.02, qa_dropout=0.1, seq_classif_dropout=0.2, pad_token_id=0, **kwargs):
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.sinusoidal_pos_embds = sinusoidal_pos_embds
self.n_layers = n_layers
self.n_heads = n_heads
self.dim = dim
self.hidden_dim = hidden_dim
self.dropout = dropout
self.attention_dropout = attention_dropout
self.activation = activation
self.initializer_range = initializer_range
self.qa_dropout = qa_dropout
self.seq_classif_dropout = seq_classif_dropout
super().__init__(**kwargs, pad_token_id=pad_token_id)
|
class DistilBertConfig(PretrainedConfig):
'''
This is the configuration class to store the configuration of a [`DistilBertModel`] or a [`TFDistilBertModel`]. It
is used to instantiate a DistilBERT model according to the specified arguments, defining the model architecture.
Instantiating a configuration with the defaults will yield a similar configuration to that of the DistilBERT
[distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 30522):
Vocabulary size of the DistilBERT model. Defines the number of different tokens that can be represented by
the `inputs_ids` passed when calling [`DistilBertModel`] or [`TFDistilBertModel`].
max_position_embeddings (`int`, *optional*, defaults to 512):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
sinusoidal_pos_embds (`boolean`, *optional*, defaults to `False`):
Whether to use sinusoidal positional embeddings.
n_layers (`int`, *optional*, defaults to 6):
Number of hidden layers in the Transformer encoder.
n_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
dim (`int`, *optional*, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
hidden_dim (`int`, *optional*, defaults to 3072):
The size of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
dropout (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_dropout (`float`, *optional*, defaults to 0.1):
The dropout ratio for the attention probabilities.
activation (`str` or `Callable`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"silu"` and `"gelu_new"` are supported.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
qa_dropout (`float`, *optional*, defaults to 0.1):
The dropout probabilities used in the question answering model [`DistilBertForQuestionAnswering`].
seq_classif_dropout (`float`, *optional*, defaults to 0.2):
The dropout probabilities used in the sequence classification and the multiple choice model
[`DistilBertForSequenceClassification`].
Examples:
```python
>>> from transformers import DistilBertConfig, DistilBertModel
>>> # Initializing a DistilBERT configuration
>>> configuration = DistilBertConfig()
>>> # Initializing a model (with random weights) from the configuration
>>> model = DistilBertModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```'''
def __init__(self, vocab_size=30522, max_position_embeddings=512, sinusoidal_pos_embds=False, n_layers=6, n_heads=12, dim=768, hidden_dim=4 * 768, dropout=0.1, attention_dropout=0.1, activation='gelu', initializer_range=0.02, qa_dropout=0.1, seq_classif_dropout=0.2, pad_token_id=0, **kwargs):
pass
| 2
| 1
| 32
| 0
| 32
| 0
| 1
| 1.23
| 1
| 1
| 0
| 0
| 1
| 13
| 1
| 1
| 96
| 9
| 39
| 34
| 20
| 48
| 18
| 17
| 16
| 1
| 1
| 0
| 1
|
2,192
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/distilbert/configuration_distilbert.py
|
transformers.models.distilbert.configuration_distilbert.DistilBertOnnxConfig
|
from ...onnx import OnnxConfig
from collections import OrderedDict
from collections.abc import Mapping
class DistilBertOnnxConfig(OnnxConfig):
@property
def inputs(self) -> Mapping[str, Mapping[int, str]]:
if self.task == 'multiple-choice':
dynamic_axis = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
dynamic_axis = {0: 'batch', 1: 'sequence'}
return OrderedDict([('input_ids', dynamic_axis), ('attention_mask', dynamic_axis)])
|
class DistilBertOnnxConfig(OnnxConfig):
@property
def inputs(self) -> Mapping[str, Mapping[int, str]]:
pass
| 3
| 0
| 11
| 0
| 11
| 0
| 2
| 0
| 1
| 3
| 0
| 0
| 1
| 0
| 1
| 1
| 13
| 0
| 13
| 4
| 10
| 0
| 6
| 3
| 4
| 2
| 1
| 1
| 2
|
2,193
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/distilbert/modeling_distilbert.py
|
transformers.models.distilbert.modeling_distilbert.DistilBertFlashAttention2
|
from typing import Optional, Union
import torch
from ...modeling_flash_attention_utils import flash_attn_supports_top_left_mask, is_flash_attn_available
class DistilBertFlashAttention2(MultiHeadSelfAttention):
"""
DistilBert flash attention module. This module inherits from `MultiHeadSelfAttention` as the weights of the module
stays untouched. The only required change would be on the forward pass where it needs to correctly call the public
API of flash attention and deal with padding tokens in case the input contains any of them.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._flash_attn_uses_top_left_mask = flash_attn_supports_top_left_mask()
def forward(self, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, mask: torch.Tensor, head_mask: Optional[torch.Tensor]=None, output_attentions: bool=False) -> tuple[torch.Tensor, ...]:
"""
Parameters:
query: torch.tensor(bs, seq_length, dim)
key: torch.tensor(bs, seq_length, dim)
value: torch.tensor(bs, seq_length, dim)
mask: torch.tensor(bs, seq_length)
Returns:
weights: torch.tensor(bs, n_heads, seq_length, seq_length) Attention weights context: torch.tensor(bs,
seq_length, dim) Contextualized layer. Optional: only if `output_attentions=True`
"""
batch_size, q_length, dim = query.size()
dim_per_head = self.dim // self.n_heads
def reshape(x: torch.Tensor) -> torch.Tensor:
"""separate heads"""
return x.view(batch_size, -1, self.n_heads, dim_per_head)
query_states = reshape(self.q_lin(query))
key_states = reshape(self.k_lin(key))
value_states = reshape(self.v_lin(value))
attn_dropout = self.config.attention_dropout if self.training else 0.0
device_type = query_states.device.type if query_states.device.type != 'mps' else 'cpu'
if query_states.dtype == torch.float32:
if torch.is_autocast_enabled():
target_dtype = torch.get_autocast_dtype(device_type) if hasattr(torch, 'get_autocast_dtype') else torch.get_autocast_gpu_dtype()
elif hasattr(self.config, '_pre_quantization_dtype'):
target_dtype = self.config._pre_quantization_dtype
else:
target_dtype = self.q_lin.weight.dtype
logger.warning_once(f'The input hidden states seems to be silently casted in float32, this might be related to the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in {target_dtype}.')
query_states = query_states.to(target_dtype)
key_states = key_states.to(target_dtype)
value_states = value_states.to(target_dtype)
attn_weights = _flash_attention_forward(query_states, key_states, value_states, mask, q_length, dropout=attn_dropout, use_top_left_mask=self._flash_attn_uses_top_left_mask, is_causal=self.is_causal)
attn_weights_reshaped = attn_weights.reshape(batch_size, q_length, self.n_heads * dim_per_head)
attn_output = self.out_lin(attn_weights_reshaped)
if output_attentions:
return (attn_output, attn_weights)
else:
return (attn_output,)
|
class DistilBertFlashAttention2(MultiHeadSelfAttention):
'''
DistilBert flash attention module. This module inherits from `MultiHeadSelfAttention` as the weights of the module
stays untouched. The only required change would be on the forward pass where it needs to correctly call the public
API of flash attention and deal with padding tokens in case the input contains any of them.
'''
def __init__(self, *args, **kwargs):
pass
def forward(self, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, mask: torch.Tensor, head_mask: Optional[torch.Tensor]=None, output_attentions: bool=False) -> tuple[torch.Tensor, ...]:
'''
Parameters:
query: torch.tensor(bs, seq_length, dim)
key: torch.tensor(bs, seq_length, dim)
value: torch.tensor(bs, seq_length, dim)
mask: torch.tensor(bs, seq_length)
Returns:
weights: torch.tensor(bs, n_heads, seq_length, seq_length) Attention weights context: torch.tensor(bs,
seq_length, dim) Contextualized layer. Optional: only if `output_attentions=True`
'''
pass
def reshape(x: torch.Tensor) -> torch.Tensor:
'''separate heads'''
pass
| 4
| 3
| 30
| 4
| 18
| 8
| 3
| 0.52
| 1
| 3
| 0
| 0
| 2
| 1
| 2
| 15
| 94
| 15
| 52
| 23
| 40
| 27
| 28
| 15
| 24
| 6
| 2
| 2
| 8
|
2,194
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/distilbert/modeling_distilbert.py
|
transformers.models.distilbert.modeling_distilbert.DistilBertForMaskedLM
|
from ...utils import auto_docstring, logging
from ...activations import get_activation
from ...configuration_utils import PretrainedConfig
from ...modeling_outputs import BaseModelOutput, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput
from torch import nn
import torch
from typing import Optional, Union
@auto_docstring(custom_intro='\n DistilBert Model with a `masked language modeling` head on top.\n ')
class DistilBertForMaskedLM(DistilBertPreTrainedModel):
_tied_weights_keys = ['vocab_projector.weight']
def __init__(self, config: PretrainedConfig):
super().__init__(config)
self.activation = get_activation(config.activation)
self.distilbert = DistilBertModel(config)
self.vocab_transform = nn.Linear(config.dim, config.dim)
self.vocab_layer_norm = nn.LayerNorm(config.dim, eps=1e-12)
self.vocab_projector = nn.Linear(config.dim, config.vocab_size)
self.post_init()
self.mlm_loss_fct = nn.CrossEntropyLoss()
def get_position_embeddings(self) -> nn.Embedding:
"""
Returns the position embeddings
"""
return self.distilbert.get_position_embeddings()
def resize_position_embeddings(self, new_num_position_embeddings: int):
"""
Resizes position embeddings of the model if `new_num_position_embeddings != config.max_position_embeddings`.
Arguments:
new_num_position_embeddings (`int`):
The number of new position embedding matrix. If position embeddings are learned, increasing the size
will add newly initialized vectors at the end, whereas reducing the size will remove vectors from the
end. If position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the
size will add correct vectors at the end following the position encoding algorithm, whereas reducing
the size will remove vectors from the end.
"""
self.distilbert.resize_position_embeddings(new_num_position_embeddings)
def get_output_embeddings(self) -> nn.Module:
return self.vocab_projector
def set_output_embeddings(self, new_embeddings: nn.Module):
self.vocab_projector = new_embeddings
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[MaskedLMOutput, tuple[torch.Tensor, ...]]:
"""
input_ids (`torch.LongTensor` of shape `(batch_size, num_choices)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_choices, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
model's internal embedding lookup matrix.
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
dlbrt_output = self.distilbert(input_ids=input_ids, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
hidden_states = dlbrt_output[0]
prediction_logits = self.vocab_transform(hidden_states)
prediction_logits = self.activation(prediction_logits)
prediction_logits = self.vocab_layer_norm(prediction_logits)
prediction_logits = self.vocab_projector(prediction_logits)
mlm_loss = None
if labels is not None:
mlm_loss = self.mlm_loss_fct(prediction_logits.view(-1, prediction_logits.size(-1)), labels.view(-1))
if not return_dict:
output = (prediction_logits,) + dlbrt_output[1:]
return (mlm_loss,) + output if mlm_loss is not None else output
return MaskedLMOutput(loss=mlm_loss, logits=prediction_logits, hidden_states=dlbrt_output.hidden_states, attentions=dlbrt_output.attentions)
|
@auto_docstring(custom_intro='\n DistilBert Model with a `masked language modeling` head on top.\n ')
class DistilBertForMaskedLM(DistilBertPreTrainedModel):
def __init__(self, config: PretrainedConfig):
pass
def get_position_embeddings(self) -> nn.Embedding:
'''
Returns the position embeddings
'''
pass
def resize_position_embeddings(self, new_num_position_embeddings: int):
'''
Resizes position embeddings of the model if `new_num_position_embeddings != config.max_position_embeddings`.
Arguments:
new_num_position_embeddings (`int`):
The number of new position embedding matrix. If position embeddings are learned, increasing the size
will add newly initialized vectors at the end, whereas reducing the size will remove vectors from the
end. If position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the
size will add correct vectors at the end following the position encoding algorithm, whereas reducing
the size will remove vectors from the end.
'''
pass
def get_output_embeddings(self) -> nn.Module:
pass
def set_output_embeddings(self, new_embeddings: nn.Module):
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[MaskedLMOutput, tuple[torch.Tensor, ...]]:
'''
input_ids (`torch.LongTensor` of shape `(batch_size, num_choices)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_choices, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
model's internal embedding lookup matrix.
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
'''
pass
| 9
| 3
| 14
| 2
| 9
| 4
| 2
| 0.4
| 1
| 6
| 2
| 0
| 6
| 6
| 6
| 7
| 98
| 15
| 63
| 30
| 40
| 25
| 34
| 19
| 27
| 5
| 2
| 1
| 10
|
2,195
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/distilbert/modeling_distilbert.py
|
transformers.models.distilbert.modeling_distilbert.DistilBertForMultipleChoice
|
from typing import Optional, Union
from torch import nn
from ...utils import auto_docstring, logging
from ...modeling_outputs import BaseModelOutput, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput
import torch
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...configuration_utils import PretrainedConfig
@auto_docstring
class DistilBertForMultipleChoice(DistilBertPreTrainedModel):
def __init__(self, config: PretrainedConfig):
super().__init__(config)
self.distilbert = DistilBertModel(config)
self.pre_classifier = nn.Linear(config.dim, config.dim)
self.classifier = nn.Linear(config.dim, 1)
self.dropout = nn.Dropout(config.seq_classif_dropout)
self.post_init()
def get_position_embeddings(self) -> nn.Embedding:
"""
Returns the position embeddings
"""
return self.distilbert.get_position_embeddings()
def resize_position_embeddings(self, new_num_position_embeddings: int):
"""
Resizes position embeddings of the model if `new_num_position_embeddings != config.max_position_embeddings`.
Arguments:
new_num_position_embeddings (`int`)
The number of new position embeddings. If position embeddings are learned, increasing the size will add
newly initialized vectors at the end, whereas reducing the size will remove vectors from the end. If
position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the size will
add correct vectors at the end following the position encoding algorithm, whereas reducing the size
will remove vectors from the end.
"""
self.distilbert.resize_position_embeddings(new_num_position_embeddings)
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[MultipleChoiceModelOutput, tuple[torch.Tensor, ...]]:
"""
input_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_choices, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
model's internal embedding lookup matrix.
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
`input_ids` above)
Examples:
```python
>>> from transformers import AutoTokenizer, DistilBertForMultipleChoice
>>> import torch
>>> tokenizer = AutoTokenizer.from_pretrained("distilbert-base-cased")
>>> model = DistilBertForMultipleChoice.from_pretrained("distilbert-base-cased")
>>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced."
>>> choice0 = "It is eaten with a fork and a knife."
>>> choice1 = "It is eaten while held in the hand."
>>> labels = torch.tensor(0).unsqueeze(0) # choice0 is correct (according to Wikipedia ;)), batch size 1
>>> encoding = tokenizer([[prompt, choice0], [prompt, choice1]], return_tensors="pt", padding=True)
>>> outputs = model(**{k: v.unsqueeze(0) for k, v in encoding.items()}, labels=labels) # batch size is 1
>>> # the linear classifier still needs to be trained
>>> loss = outputs.loss
>>> logits = outputs.logits
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
inputs_embeds = inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1)) if inputs_embeds is not None else None
outputs = self.distilbert(input_ids, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
hidden_state = outputs[0]
pooled_output = hidden_state[:, 0]
pooled_output = self.pre_classifier(pooled_output)
pooled_output = nn.ReLU()(pooled_output)
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.view(-1, num_choices)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
if not return_dict:
output = (reshaped_logits,) + outputs[1:]
return (loss,) + output if loss is not None else output
return MultipleChoiceModelOutput(loss=loss, logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
|
@auto_docstring
class DistilBertForMultipleChoice(DistilBertPreTrainedModel):
def __init__(self, config: PretrainedConfig):
pass
def get_position_embeddings(self) -> nn.Embedding:
'''
Returns the position embeddings
'''
pass
def resize_position_embeddings(self, new_num_position_embeddings: int):
'''
Resizes position embeddings of the model if `new_num_position_embeddings != config.max_position_embeddings`.
Arguments:
new_num_position_embeddings (`int`)
The number of new position embeddings. If position embeddings are learned, increasing the size will add
newly initialized vectors at the end, whereas reducing the size will remove vectors from the end. If
position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the size will
add correct vectors at the end following the position encoding algorithm, whereas reducing the size
will remove vectors from the end.
'''
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[MultipleChoiceModelOutput, tuple[torch.Tensor, ...]]:
'''
input_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_choices, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
model's internal embedding lookup matrix.
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
`input_ids` above)
Examples:
```python
>>> from transformers import AutoTokenizer, DistilBertForMultipleChoice
>>> import torch
>>> tokenizer = AutoTokenizer.from_pretrained("distilbert-base-cased")
>>> model = DistilBertForMultipleChoice.from_pretrained("distilbert-base-cased")
>>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced."
>>> choice0 = "It is eaten with a fork and a knife."
>>> choice1 = "It is eaten while held in the hand."
>>> labels = torch.tensor(0).unsqueeze(0) # choice0 is correct (according to Wikipedia ;)), batch size 1
>>> encoding = tokenizer([[prompt, choice0], [prompt, choice1]], return_tensors="pt", padding=True)
>>> outputs = model(**{k: v.unsqueeze(0) for k, v in encoding.items()}, labels=labels) # batch size is 1
>>> # the linear classifier still needs to be trained
>>> loss = outputs.loss
>>> logits = outputs.logits
```'''
pass
| 7
| 3
| 28
| 4
| 15
| 11
| 3
| 0.66
| 1
| 6
| 2
| 0
| 4
| 4
| 4
| 5
| 121
| 20
| 65
| 31
| 46
| 43
| 34
| 18
| 29
| 9
| 2
| 1
| 12
|
2,196
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/distilbert/modeling_distilbert.py
|
transformers.models.distilbert.modeling_distilbert.DistilBertForQuestionAnswering
|
from torch import nn
from ...modeling_outputs import BaseModelOutput, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput
from ...utils import auto_docstring, logging
from typing import Optional, Union
from ...configuration_utils import PretrainedConfig
import torch
@auto_docstring
class DistilBertForQuestionAnswering(DistilBertPreTrainedModel):
def __init__(self, config: PretrainedConfig):
super().__init__(config)
self.distilbert = DistilBertModel(config)
self.qa_outputs = nn.Linear(config.dim, config.num_labels)
if config.num_labels != 2:
raise ValueError(f'config.num_labels should be 2, but it is {config.num_labels}')
self.dropout = nn.Dropout(config.qa_dropout)
self.post_init()
def get_position_embeddings(self) -> nn.Embedding:
"""
Returns the position embeddings
"""
return self.distilbert.get_position_embeddings()
def resize_position_embeddings(self, new_num_position_embeddings: int):
"""
Resizes position embeddings of the model if `new_num_position_embeddings != config.max_position_embeddings`.
Arguments:
new_num_position_embeddings (`int`):
The number of new position embedding matrix. If position embeddings are learned, increasing the size
will add newly initialized vectors at the end, whereas reducing the size will remove vectors from the
end. If position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the
size will add correct vectors at the end following the position encoding algorithm, whereas reducing
the size will remove vectors from the end.
"""
self.distilbert.resize_position_embeddings(new_num_position_embeddings)
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, start_positions: Optional[torch.Tensor]=None, end_positions: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[QuestionAnsweringModelOutput, tuple[torch.Tensor, ...]]:
"""
input_ids (`torch.LongTensor` of shape `(batch_size, num_choices)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_choices, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
model's internal embedding lookup matrix.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
distilbert_output = self.distilbert(input_ids=input_ids, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
hidden_states = distilbert_output[0]
hidden_states = self.dropout(hidden_states)
logits = self.qa_outputs(hidden_states)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1).contiguous()
end_logits = end_logits.squeeze(-1).contiguous()
total_loss = None
if start_positions is not None and end_positions is not None:
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
ignored_index = start_logits.size(1)
start_positions = start_positions.clamp(0, ignored_index)
end_positions = end_positions.clamp(0, ignored_index)
loss_fct = nn.CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
if not return_dict:
output = (start_logits, end_logits) + distilbert_output[1:]
return (total_loss,) + output if total_loss is not None else output
return QuestionAnsweringModelOutput(loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=distilbert_output.hidden_states, attentions=distilbert_output.attentions)
|
@auto_docstring
class DistilBertForQuestionAnswering(DistilBertPreTrainedModel):
def __init__(self, config: PretrainedConfig):
pass
def get_position_embeddings(self) -> nn.Embedding:
'''
Returns the position embeddings
'''
pass
def resize_position_embeddings(self, new_num_position_embeddings: int):
'''
Resizes position embeddings of the model if `new_num_position_embeddings != config.max_position_embeddings`.
Arguments:
new_num_position_embeddings (`int`):
The number of new position embedding matrix. If position embeddings are learned, increasing the size
will add newly initialized vectors at the end, whereas reducing the size will remove vectors from the
end. If position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the
size will add correct vectors at the end following the position encoding algorithm, whereas reducing
the size will remove vectors from the end.
'''
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, start_positions: Optional[torch.Tensor]=None, end_positions: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[QuestionAnsweringModelOutput, tuple[torch.Tensor, ...]]:
'''
input_ids (`torch.LongTensor` of shape `(batch_size, num_choices)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_choices, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
model's internal embedding lookup matrix.
'''
pass
| 7
| 3
| 25
| 3
| 16
| 8
| 3
| 0.44
| 1
| 7
| 2
| 0
| 4
| 3
| 4
| 5
| 109
| 13
| 70
| 30
| 48
| 31
| 39
| 18
| 34
| 7
| 2
| 2
| 11
|
2,197
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/distilbert/modeling_distilbert.py
|
transformers.models.distilbert.modeling_distilbert.DistilBertForSequenceClassification
|
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
import torch
from ...utils import auto_docstring, logging
from typing import Optional, Union
from torch import nn
from ...modeling_outputs import BaseModelOutput, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput
from ...configuration_utils import PretrainedConfig
@auto_docstring(custom_intro='\n DistilBert Model transformer with a sequence classification/regression head on top (a linear layer on top of the\n pooled output) e.g. for GLUE tasks.\n ')
class DistilBertForSequenceClassification(DistilBertPreTrainedModel):
def __init__(self, config: PretrainedConfig):
super().__init__(config)
self.num_labels = config.num_labels
self.config = config
self.distilbert = DistilBertModel(config)
self.pre_classifier = nn.Linear(config.dim, config.dim)
self.classifier = nn.Linear(config.dim, config.num_labels)
self.dropout = nn.Dropout(config.seq_classif_dropout)
self.post_init()
def get_position_embeddings(self) -> nn.Embedding:
"""
Returns the position embeddings
"""
return self.distilbert.get_position_embeddings()
def resize_position_embeddings(self, new_num_position_embeddings: int):
"""
Resizes position embeddings of the model if `new_num_position_embeddings != config.max_position_embeddings`.
Arguments:
new_num_position_embeddings (`int`):
The number of new position embedding matrix. If position embeddings are learned, increasing the size
will add newly initialized vectors at the end, whereas reducing the size will remove vectors from the
end. If position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the
size will add correct vectors at the end following the position encoding algorithm, whereas reducing
the size will remove vectors from the end.
"""
self.distilbert.resize_position_embeddings(new_num_position_embeddings)
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[SequenceClassifierOutput, tuple[torch.Tensor, ...]]:
"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
distilbert_output = self.distilbert(input_ids=input_ids, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
hidden_state = distilbert_output[0]
pooled_output = hidden_state[:, 0]
pooled_output = self.pre_classifier(pooled_output)
pooled_output = nn.ReLU()(pooled_output)
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
loss = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
self.config.problem_type = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = 'single_label_classification'
else:
self.config.problem_type = 'multi_label_classification'
if self.config.problem_type == 'regression':
loss_fct = MSELoss()
if self.num_labels == 1:
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(logits, labels)
elif self.config.problem_type == 'single_label_classification':
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
elif self.config.problem_type == 'multi_label_classification':
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
if not return_dict:
output = (logits,) + distilbert_output[1:]
return (loss,) + output if loss is not None else output
return SequenceClassifierOutput(loss=loss, logits=logits, hidden_states=distilbert_output.hidden_states, attentions=distilbert_output.attentions)
|
@auto_docstring(custom_intro='\n DistilBert Model transformer with a sequence classification/regression head on top (a linear layer on top of the\n pooled output) e.g. for GLUE tasks.\n ')
class DistilBertForSequenceClassification(DistilBertPreTrainedModel):
def __init__(self, config: PretrainedConfig):
pass
def get_position_embeddings(self) -> nn.Embedding:
'''
Returns the position embeddings
'''
pass
def resize_position_embeddings(self, new_num_position_embeddings: int):
'''
Resizes position embeddings of the model if `new_num_position_embeddings != config.max_position_embeddings`.
Arguments:
new_num_position_embeddings (`int`):
The number of new position embedding matrix. If position embeddings are learned, increasing the size
will add newly initialized vectors at the end, whereas reducing the size will remove vectors from the
end. If position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the
size will add correct vectors at the end following the position encoding algorithm, whereas reducing
the size will remove vectors from the end.
'''
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[SequenceClassifierOutput, tuple[torch.Tensor, ...]]:
'''
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
'''
pass
| 7
| 3
| 25
| 2
| 18
| 7
| 4
| 0.34
| 1
| 6
| 2
| 0
| 4
| 6
| 4
| 5
| 108
| 11
| 77
| 29
| 56
| 26
| 43
| 18
| 38
| 12
| 2
| 3
| 15
|
2,198
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/distilbert/modeling_distilbert.py
|
transformers.models.distilbert.modeling_distilbert.DistilBertForTokenClassification
|
from ...utils import auto_docstring, logging
from ...configuration_utils import PretrainedConfig
from typing import Optional, Union
import torch
from ...modeling_outputs import BaseModelOutput, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
@auto_docstring
class DistilBertForTokenClassification(DistilBertPreTrainedModel):
def __init__(self, config: PretrainedConfig):
super().__init__(config)
self.num_labels = config.num_labels
self.distilbert = DistilBertModel(config)
self.dropout = nn.Dropout(config.dropout)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.post_init()
def get_position_embeddings(self) -> nn.Embedding:
"""
Returns the position embeddings
"""
return self.distilbert.get_position_embeddings()
def resize_position_embeddings(self, new_num_position_embeddings: int):
"""
Resizes position embeddings of the model if `new_num_position_embeddings != config.max_position_embeddings`.
Arguments:
new_num_position_embeddings (`int`):
The number of new position embedding matrix. If position embeddings are learned, increasing the size
will add newly initialized vectors at the end, whereas reducing the size will remove vectors from the
end. If position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the
size will add correct vectors at the end following the position encoding algorithm, whereas reducing
the size will remove vectors from the end.
"""
self.distilbert.resize_position_embeddings(new_num_position_embeddings)
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[TokenClassifierOutput, tuple[torch.Tensor, ...]]:
"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.distilbert(input_ids, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[1:]
return (loss,) + output if loss is not None else output
return TokenClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
|
@auto_docstring
class DistilBertForTokenClassification(DistilBertPreTrainedModel):
def __init__(self, config: PretrainedConfig):
pass
def get_position_embeddings(self) -> nn.Embedding:
'''
Returns the position embeddings
'''
pass
def resize_position_embeddings(self, new_num_position_embeddings: int):
'''
Resizes position embeddings of the model if `new_num_position_embeddings != config.max_position_embeddings`.
Arguments:
new_num_position_embeddings (`int`):
The number of new position embedding matrix. If position embeddings are learned, increasing the size
will add newly initialized vectors at the end, whereas reducing the size will remove vectors from the
end. If position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the
size will add correct vectors at the end following the position encoding algorithm, whereas reducing
the size will remove vectors from the end.
'''
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[TokenClassifierOutput, tuple[torch.Tensor, ...]]:
'''
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
'''
pass
| 7
| 3
| 19
| 2
| 12
| 5
| 2
| 0.33
| 1
| 6
| 2
| 0
| 4
| 4
| 4
| 5
| 85
| 12
| 55
| 26
| 34
| 18
| 26
| 15
| 21
| 5
| 2
| 1
| 8
|
2,199
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/distilbert/modeling_distilbert.py
|
transformers.models.distilbert.modeling_distilbert.DistilBertModel
|
from ...modeling_attn_mask_utils import _prepare_4d_attention_mask_for_sdpa
import torch
from ...utils import auto_docstring, logging
from ...configuration_utils import PretrainedConfig
from torch import nn
from typing import Optional, Union
from ...modeling_outputs import BaseModelOutput, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput
@auto_docstring
class DistilBertModel(DistilBertPreTrainedModel):
def __init__(self, config: PretrainedConfig):
super().__init__(config)
self.embeddings = Embeddings(config)
self.transformer = Transformer(config)
self.post_init()
def get_position_embeddings(self) -> nn.Embedding:
"""
Returns the position embeddings
"""
return self.embeddings.position_embeddings
def resize_position_embeddings(self, new_num_position_embeddings: int):
"""
Resizes position embeddings of the model if `new_num_position_embeddings != config.max_position_embeddings`.
Arguments:
new_num_position_embeddings (`int`):
The number of new position embedding matrix. If position embeddings are learned, increasing the size
will add newly initialized vectors at the end, whereas reducing the size will remove vectors from the
end. If position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the
size will add correct vectors at the end following the position encoding algorithm, whereas reducing
the size will remove vectors from the end.
"""
num_position_embeds_diff = new_num_position_embeddings - self.config.max_position_embeddings
if num_position_embeds_diff == 0:
return
logger.info(f'Setting `config.max_position_embeddings={new_num_position_embeddings}`...')
self.config.max_position_embeddings = new_num_position_embeddings
old_position_embeddings_weight = self.embeddings.position_embeddings.weight.clone()
self.embeddings.position_embeddings = nn.Embedding(self.config.max_position_embeddings, self.config.dim)
if self.config.sinusoidal_pos_embds:
create_sinusoidal_embeddings(n_pos=self.config.max_position_embeddings, dim=self.config.dim, out=self.position_embeddings.weight)
else:
with torch.no_grad():
if num_position_embeds_diff > 0:
self.embeddings.position_embeddings.weight[:-num_position_embeds_diff] = nn.Parameter(old_position_embeddings_weight)
else:
self.embeddings.position_embeddings.weight = nn.Parameter(old_position_embeddings_weight[:num_position_embeds_diff])
self.embeddings.position_embeddings.to(self.device)
def get_input_embeddings(self) -> nn.Embedding:
return self.embeddings.word_embeddings
def set_input_embeddings(self, new_embeddings: nn.Embedding):
self.embeddings.word_embeddings = new_embeddings
def _prune_heads(self, heads_to_prune: dict[int, list[list[int]]]):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.transformer.layer[layer].attention.prune_heads(heads)
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[BaseModelOutput, tuple[torch.Tensor, ...]]:
"""
input_ids (`torch.LongTensor` of shape `(batch_size, num_choices)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_choices, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
model's internal embedding lookup matrix.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time')
elif input_ids is not None:
self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError('You have to specify either input_ids or inputs_embeds')
device = input_ids.device if input_ids is not None else inputs_embeds.device
head_mask_is_none = head_mask is None
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
embeddings = self.embeddings(input_ids, inputs_embeds)
if self.config._attn_implementation == 'flash_attention_2':
attention_mask = attention_mask if attention_mask is not None and 0 in attention_mask else None
else:
if attention_mask is None:
attention_mask = torch.ones(input_shape, device=device)
if self.config._attn_implementation == 'sdpa' and head_mask_is_none and (not output_attentions):
attention_mask = _prepare_4d_attention_mask_for_sdpa(attention_mask, embeddings.dtype, tgt_len=input_shape[1])
return self.transformer(x=embeddings, attn_mask=attention_mask, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
|
@auto_docstring
class DistilBertModel(DistilBertPreTrainedModel):
def __init__(self, config: PretrainedConfig):
pass
def get_position_embeddings(self) -> nn.Embedding:
'''
Returns the position embeddings
'''
pass
def resize_position_embeddings(self, new_num_position_embeddings: int):
'''
Resizes position embeddings of the model if `new_num_position_embeddings != config.max_position_embeddings`.
Arguments:
new_num_position_embeddings (`int`):
The number of new position embedding matrix. If position embeddings are learned, increasing the size
will add newly initialized vectors at the end, whereas reducing the size will remove vectors from the
end. If position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the
size will add correct vectors at the end following the position encoding algorithm, whereas reducing
the size will remove vectors from the end.
'''
pass
def get_input_embeddings(self) -> nn.Embedding:
pass
def set_input_embeddings(self, new_embeddings: nn.Embedding):
pass
def _prune_heads(self, heads_to_prune: dict[int, list[list[int]]]):
'''
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
'''
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[BaseModelOutput, tuple[torch.Tensor, ...]]:
'''
input_ids (`torch.LongTensor` of shape `(batch_size, num_choices)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_choices, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
model's internal embedding lookup matrix.
'''
pass
| 10
| 4
| 17
| 2
| 12
| 4
| 3
| 0.27
| 1
| 8
| 3
| 0
| 7
| 4
| 7
| 8
| 133
| 21
| 91
| 29
| 68
| 25
| 53
| 19
| 45
| 12
| 2
| 3
| 22
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.