id
int64 0
328k
| repository_name
stringlengths 7
58
| file_path
stringlengths 9
302
| class_name
stringlengths 5
256
| human_written_code
stringlengths 16
2.16M
| class_skeleton
stringlengths 18
1.49M
⌀ | total_program_units
int64 1
1.76k
| total_doc_str
int64 0
771
| AvgCountLine
float64 0
7.89k
| AvgCountLineBlank
float64 0
297
| AvgCountLineCode
float64 0
7.89k
| AvgCountLineComment
float64 0
7.89k
| AvgCyclomatic
float64 0
130
| CommentToCodeRatio
float64 0
168
| CountClassBase
float64 0
40
| CountClassCoupled
float64 0
583
| CountClassCoupledModified
float64 0
575
| CountClassDerived
float64 0
5.35k
| CountDeclInstanceMethod
float64 0
529
| CountDeclInstanceVariable
float64 0
296
| CountDeclMethod
float64 0
599
| CountDeclMethodAll
float64 0
1.12k
| CountLine
float64 1
40.4k
| CountLineBlank
float64 0
8.16k
| CountLineCode
float64 1
25.7k
| CountLineCodeDecl
float64 1
8.15k
| CountLineCodeExe
float64 0
24.2k
| CountLineComment
float64 0
16.5k
| CountStmt
float64 1
9.71k
| CountStmtDecl
float64 1
8.15k
| CountStmtExe
float64 0
9.69k
| MaxCyclomatic
float64 0
759
| MaxInheritanceTree
float64 0
16
| MaxNesting
float64 0
34
| SumCyclomatic
float64 0
2.9k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2,900
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/grounding_dino/modeling_grounding_dino.py
|
transformers.models.grounding_dino.modeling_grounding_dino.GroundingDinoLearnedPositionEmbedding
|
from torch import Tensor, nn
import torch
import torch.nn.functional as F
class GroundingDinoLearnedPositionEmbedding(nn.Module):
"""
This module learns positional embeddings up to a fixed maximum size.
"""
def __init__(self, config):
super().__init__()
embedding_dim = config.d_model // 2
self.row_embeddings = nn.Embedding(50, embedding_dim)
self.column_embeddings = nn.Embedding(50, embedding_dim)
def forward(self, pixel_values, pixel_mask=None):
height, width = pixel_values.shape[-2:]
width_values = torch.arange(width, device=pixel_values.device)
height_values = torch.arange(height, device=pixel_values.device)
x_emb = self.column_embeddings(width_values)
y_emb = self.row_embeddings(height_values)
pos = torch.cat([x_emb.unsqueeze(0).repeat(height, 1, 1), y_emb.unsqueeze(1).repeat(1, width, 1)], dim=-1)
pos = pos.permute(2, 0, 1)
pos = pos.unsqueeze(0)
pos = pos.repeat(pixel_values.shape[0], 1, 1, 1)
return pos
|
class GroundingDinoLearnedPositionEmbedding(nn.Module):
'''
This module learns positional embeddings up to a fixed maximum size.
'''
def __init__(self, config):
pass
def forward(self, pixel_values, pixel_mask=None):
pass
| 3
| 1
| 9
| 1
| 8
| 0
| 1
| 0.18
| 1
| 1
| 0
| 0
| 2
| 2
| 2
| 12
| 23
| 3
| 17
| 12
| 14
| 3
| 17
| 12
| 14
| 1
| 1
| 0
| 2
|
2,901
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/grounding_dino/modeling_grounding_dino.py
|
transformers.models.grounding_dino.modeling_grounding_dino.GroundingDinoMLPPredictionHead
|
from torch import Tensor, nn
class GroundingDinoMLPPredictionHead(nn.Module):
"""
Very simple multi-layer perceptron (MLP, also called FFN), used to predict the normalized center coordinates,
height and width of a bounding box w.r.t. an image.
Copied from https://github.com/facebookresearch/detr/blob/master/models/detr.py
"""
def __init__(self, input_dim, hidden_dim, output_dim, num_layers):
super().__init__()
self.num_layers = num_layers
h = [hidden_dim] * (num_layers - 1)
self.layers = nn.ModuleList((nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim])))
def forward(self, x):
for i, layer in enumerate(self.layers):
x = nn.functional.relu(layer(x)) if i < self.num_layers - 1 else layer(x)
return x
|
class GroundingDinoMLPPredictionHead(nn.Module):
'''
Very simple multi-layer perceptron (MLP, also called FFN), used to predict the normalized center coordinates,
height and width of a bounding box w.r.t. an image.
Copied from https://github.com/facebookresearch/detr/blob/master/models/detr.py
'''
def __init__(self, input_dim, hidden_dim, output_dim, num_layers):
pass
def forward(self, x):
pass
| 3
| 1
| 5
| 0
| 5
| 0
| 2
| 0.5
| 1
| 3
| 0
| 0
| 2
| 2
| 2
| 12
| 19
| 4
| 10
| 7
| 7
| 5
| 10
| 7
| 7
| 3
| 1
| 1
| 4
|
2,902
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/grounding_dino/modeling_grounding_dino.py
|
transformers.models.grounding_dino.modeling_grounding_dino.GroundingDinoModel
|
import torch
from ...utils import auto_docstring, logging
from typing import Optional, Union
from ...pytorch_utils import meshgrid
from .configuration_grounding_dino import GroundingDinoConfig
from torch import Tensor, nn
from ..auto import AutoModel
import torch.nn.functional as F
@auto_docstring(custom_intro='\n The bare Grounding DINO Model (consisting of a backbone and encoder-decoder Transformer) outputting raw\n hidden-states without any specific head on top.\n ')
class GroundingDinoModel(GroundingDinoPreTrainedModel):
def __init__(self, config: GroundingDinoConfig):
super().__init__(config)
backbone = GroundingDinoConvEncoder(config)
position_embeddings = build_position_encoding(config)
self.backbone = GroundingDinoConvModel(backbone, position_embeddings)
if config.num_feature_levels > 1:
num_backbone_outs = len(backbone.intermediate_channel_sizes)
input_proj_list = []
for i in range(num_backbone_outs):
in_channels = backbone.intermediate_channel_sizes[i]
input_proj_list.append(nn.Sequential(nn.Conv2d(in_channels, config.d_model, kernel_size=1), nn.GroupNorm(32, config.d_model)))
for _ in range(config.num_feature_levels - num_backbone_outs):
input_proj_list.append(nn.Sequential(nn.Conv2d(in_channels, config.d_model, kernel_size=3, stride=2, padding=1), nn.GroupNorm(32, config.d_model)))
in_channels = config.d_model
self.input_proj_vision = nn.ModuleList(input_proj_list)
else:
self.input_proj_vision = nn.ModuleList([nn.Sequential(nn.Conv2d(backbone.intermediate_channel_sizes[-1], config.d_model, kernel_size=1), nn.GroupNorm(32, config.d_model))])
self.text_backbone = AutoModel.from_config(config.text_config, add_pooling_layer=False)
self.text_projection = nn.Linear(config.text_config.hidden_size, config.d_model)
if config.embedding_init_target or not config.two_stage:
self.query_position_embeddings = nn.Embedding(config.num_queries, config.d_model)
self.encoder = GroundingDinoEncoder(config)
self.decoder = GroundingDinoDecoder(config)
self.level_embed = nn.Parameter(torch.Tensor(config.num_feature_levels, config.d_model))
if config.two_stage:
self.enc_output = nn.Linear(config.d_model, config.d_model)
self.enc_output_norm = nn.LayerNorm(config.d_model, config.layer_norm_eps)
if config.two_stage_bbox_embed_share and config.decoder_bbox_embed_share and (self.decoder.bbox_embed is not None):
self.encoder_output_bbox_embed = self.decoder.bbox_embed
else:
self.encoder_output_bbox_embed = GroundingDinoMLPPredictionHead(input_dim=config.d_model, hidden_dim=config.d_model, output_dim=4, num_layers=3)
self.encoder_output_class_embed = GroundingDinoContrastiveEmbedding(config)
else:
self.reference_points = nn.Embedding(config.num_queries, 4)
self.post_init()
def freeze_backbone(self):
for name, param in self.backbone.conv_encoder.model.named_parameters():
param.requires_grad_(False)
def unfreeze_backbone(self):
for name, param in self.backbone.conv_encoder.model.named_parameters():
param.requires_grad_(True)
def get_valid_ratio(self, mask):
"""Get the valid ratio of all feature maps."""
_, height, width = mask.shape
valid_height = torch.sum(mask[:, :, 0], 1)
valid_width = torch.sum(mask[:, 0, :], 1)
valid_ratio_height = valid_height.float() / height
valid_ratio_width = valid_width.float() / width
valid_ratio = torch.stack([valid_ratio_width, valid_ratio_height], -1)
return valid_ratio
def generate_encoder_output_proposals(self, enc_output, padding_mask, spatial_shapes):
"""Generate the encoder output proposals from encoded enc_output.
Args:
enc_output (`torch.Tensor[batch_size, sequence_length, hidden_size]`): Output of the encoder.
padding_mask (`torch.Tensor[batch_size, sequence_length]`): Padding mask for `enc_output`.
spatial_shapes (`torch.Tensor[num_feature_levels, 2]`): Spatial shapes of the feature maps.
Returns:
`tuple(torch.FloatTensor)`: A tuple of feature map and bbox prediction.
- object_query (Tensor[batch_size, sequence_length, hidden_size]): Object query features. Later used to
directly predict a bounding box. (without the need of a decoder)
- output_proposals (Tensor[batch_size, sequence_length, 4]): Normalized proposals, after an inverse
sigmoid.
"""
batch_size = enc_output.shape[0]
proposals = []
current_position = 0
for level, (height, width) in enumerate(spatial_shapes):
mask_flatten_ = padding_mask[:, current_position:current_position + height * width]
mask_flatten_ = mask_flatten_.view(batch_size, height, width, 1)
valid_height = torch.sum(~mask_flatten_[:, :, 0, 0], 1)
valid_width = torch.sum(~mask_flatten_[:, 0, :, 0], 1)
grid_y, grid_x = meshgrid(torch.linspace(0, height - 1, height, dtype=torch.float32, device=enc_output.device), torch.linspace(0, width - 1, width, dtype=torch.float32, device=enc_output.device), indexing='ij')
grid = torch.cat([grid_x.unsqueeze(-1), grid_y.unsqueeze(-1)], -1)
scale = torch.cat([valid_width.unsqueeze(-1), valid_height.unsqueeze(-1)], 1).view(batch_size, 1, 1, 2)
grid = (grid.unsqueeze(0).expand(batch_size, -1, -1, -1) + 0.5) / scale
width_height = torch.ones_like(grid) * 0.05 * 2.0 ** level
proposal = torch.cat((grid, width_height), -1).view(batch_size, -1, 4)
proposals.append(proposal)
current_position += height * width
output_proposals = torch.cat(proposals, 1)
output_proposals_valid = ((output_proposals > 0.01) & (output_proposals < 0.99)).all(-1, keepdim=True)
output_proposals = torch.log(output_proposals / (1 - output_proposals))
output_proposals = output_proposals.masked_fill(padding_mask.unsqueeze(-1), float('inf'))
output_proposals = output_proposals.masked_fill(~output_proposals_valid, float('inf'))
object_query = enc_output
object_query = object_query.masked_fill(padding_mask.unsqueeze(-1), float(0))
object_query = object_query.masked_fill(~output_proposals_valid, float(0))
object_query = self.enc_output_norm(self.enc_output(object_query))
return (object_query, output_proposals)
@auto_docstring
def forward(self, pixel_values: Tensor, input_ids: Tensor, token_type_ids: Optional[Tensor]=None, attention_mask: Optional[Tensor]=None, pixel_mask: Optional[Tensor]=None, encoder_outputs=None, output_attentions=None, output_hidden_states=None, return_dict=None):
"""
input_ids (`torch.LongTensor` of shape `(batch_size, text_sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
it.
Indices can be obtained using [`AutoTokenizer`]. See [`BertTokenizer.__call__`] for details.
token_type_ids (`torch.LongTensor` of shape `(batch_size, text_sequence_length)`, *optional*):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
1]`: 0 corresponds to a `sentence A` token, 1 corresponds to a `sentence B` token
[What are token type IDs?](../glossary#token-type-ids)
Examples:
```python
>>> from transformers import AutoProcessor, AutoModel
>>> from PIL import Image
>>> import requests
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> text = "a cat."
>>> processor = AutoProcessor.from_pretrained("IDEA-Research/grounding-dino-tiny")
>>> model = AutoModel.from_pretrained("IDEA-Research/grounding-dino-tiny")
>>> inputs = processor(images=image, text=text, return_tensors="pt")
>>> outputs = model(**inputs)
>>> last_hidden_states = outputs.last_hidden_state
>>> list(last_hidden_states.shape)
[1, 900, 256]
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
text_self_attention_masks, position_ids = generate_masks_with_special_tokens_and_transfer_map(input_ids)
if attention_mask is None:
attention_mask = torch.ones_like(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
text_token_mask = attention_mask.bool()
max_text_len = self.config.max_text_len
if text_self_attention_masks.shape[1] > max_text_len:
text_self_attention_masks = text_self_attention_masks[:, :max_text_len, :max_text_len]
position_ids = position_ids[:, :max_text_len]
input_ids = input_ids[:, :max_text_len]
token_type_ids = token_type_ids[:, :max_text_len]
text_token_mask = text_token_mask[:, :max_text_len]
text_outputs = self.text_backbone(input_ids, text_self_attention_masks, token_type_ids, position_ids, return_dict=return_dict)
text_features = text_outputs.last_hidden_state if return_dict else text_outputs[0]
text_features = self.text_projection(text_features)
batch_size, num_channels, height, width = pixel_values.shape
device = pixel_values.device
if pixel_mask is None:
pixel_mask = torch.ones((batch_size, height, width), dtype=torch.long, device=device)
vision_features, position_embeddings_list = self.backbone(pixel_values, pixel_mask)
feature_maps = []
masks = []
for level, (source, mask) in enumerate(vision_features):
feature_maps.append(self.input_proj_vision[level](source))
masks.append(mask)
if self.config.num_feature_levels > len(feature_maps):
_len_sources = len(feature_maps)
for level in range(_len_sources, self.config.num_feature_levels):
if level == _len_sources:
source = self.input_proj_vision[level](vision_features[-1][0])
else:
source = self.input_proj_vision[level](feature_maps[-1])
mask = nn.functional.interpolate(pixel_mask[None].float(), size=source.shape[-2:]).to(torch.bool)[0]
pos_l = self.backbone.position_embedding(source, mask).to(source.dtype)
feature_maps.append(source)
masks.append(mask)
position_embeddings_list.append(pos_l)
query_embeds = None
if self.config.embedding_init_target or self.config.two_stage:
query_embeds = self.query_position_embeddings.weight
source_flatten = []
mask_flatten = []
lvl_pos_embed_flatten = []
spatial_shapes_list = []
for level, (source, mask, pos_embed) in enumerate(zip(feature_maps, masks, position_embeddings_list)):
batch_size, num_channels, height, width = source.shape
spatial_shape = (height, width)
spatial_shapes_list.append(spatial_shape)
source = source.flatten(2).transpose(1, 2)
mask = mask.flatten(1)
pos_embed = pos_embed.flatten(2).transpose(1, 2)
lvl_pos_embed = pos_embed + self.level_embed[level].view(1, 1, -1)
lvl_pos_embed_flatten.append(lvl_pos_embed)
source_flatten.append(source)
mask_flatten.append(mask)
source_flatten = torch.cat(source_flatten, 1)
mask_flatten = torch.cat(mask_flatten, 1)
lvl_pos_embed_flatten = torch.cat(lvl_pos_embed_flatten, 1)
spatial_shapes = torch.as_tensor(spatial_shapes_list, dtype=torch.long, device=source_flatten.device)
level_start_index = torch.cat((spatial_shapes.new_zeros((1,)), spatial_shapes.prod(1).cumsum(0)[:-1]))
valid_ratios = torch.stack([self.get_valid_ratio(m) for m in masks], 1)
valid_ratios = valid_ratios.float()
if encoder_outputs is None:
encoder_outputs = self.encoder(vision_features=source_flatten, vision_attention_mask=~mask_flatten, vision_position_embedding=lvl_pos_embed_flatten, spatial_shapes=spatial_shapes, spatial_shapes_list=spatial_shapes_list, level_start_index=level_start_index, valid_ratios=valid_ratios, text_features=text_features, text_attention_mask=~text_token_mask, text_position_embedding=None, text_self_attention_masks=~text_self_attention_masks, text_position_ids=position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
elif return_dict and (not isinstance(encoder_outputs, GroundingDinoEncoderOutput)):
encoder_outputs = GroundingDinoEncoderOutput(last_hidden_state_vision=encoder_outputs[0], last_hidden_state_text=encoder_outputs[1], vision_hidden_states=encoder_outputs[2] if output_hidden_states else None, text_hidden_states=encoder_outputs[3] if output_hidden_states else None, attentions=encoder_outputs[-1] if output_attentions else None)
topk_proposals = None
enc_outputs_class = None
enc_outputs_coord_logits = None
encoder_logits = None
encoder_pred_boxes = None
if self.config.two_stage:
object_query_embedding, output_proposals = self.generate_encoder_output_proposals(encoder_outputs[0], ~mask_flatten, spatial_shapes)
enc_outputs_class = self.encoder_output_class_embed(object_query_embedding, encoder_outputs[1], text_token_mask)
delta_bbox = self.encoder_output_bbox_embed(object_query_embedding)
enc_outputs_coord_logits = delta_bbox + output_proposals
topk = self.config.num_queries
topk_logits = enc_outputs_class.max(-1)[0]
topk_proposals = torch.topk(topk_logits, topk, dim=1)[1]
topk_coords_logits = torch.gather(enc_outputs_coord_logits, 1, topk_proposals.unsqueeze(-1).repeat(1, 1, 4))
topk_coords_logits = topk_coords_logits.detach()
reference_points = topk_coords_logits.sigmoid()
init_reference_points = reference_points
if query_embeds is not None:
target = query_embeds.unsqueeze(0).repeat(batch_size, 1, 1)
else:
target = torch.gather(object_query_embedding, 1, topk_proposals.unsqueeze(-1).repeat(1, 1, self.d_model)).detach()
encoder_pred_boxes = reference_points
encoder_logits = self.encoder_output_class_embed(target, text_features, text_token_mask)
else:
target = query_embeds.unsqueeze(0).repeat(batch_size, 1, 1)
reference_points = self.reference_points.weight.unsqueeze(0).repeat(batch_size, 1, 1).sigmoid()
init_reference_points = reference_points
decoder_outputs = self.decoder(inputs_embeds=target, vision_encoder_hidden_states=encoder_outputs[0], vision_encoder_attention_mask=mask_flatten, text_encoder_hidden_states=encoder_outputs[1], text_encoder_attention_mask=~text_token_mask, reference_points=reference_points, spatial_shapes=spatial_shapes, spatial_shapes_list=spatial_shapes_list, level_start_index=level_start_index, valid_ratios=valid_ratios, self_attn_mask=None, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
if not return_dict:
enc_outputs = tuple((value for value in [enc_outputs_class, enc_outputs_coord_logits, encoder_logits, encoder_pred_boxes] if value is not None))
tuple_outputs = (decoder_outputs[0], init_reference_points) + decoder_outputs[1:] + encoder_outputs + enc_outputs
return tuple_outputs
return GroundingDinoModelOutput(last_hidden_state=decoder_outputs.last_hidden_state, init_reference_points=init_reference_points, intermediate_hidden_states=decoder_outputs.intermediate_hidden_states, intermediate_reference_points=decoder_outputs.intermediate_reference_points, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, encoder_last_hidden_state_vision=encoder_outputs.last_hidden_state_vision, encoder_last_hidden_state_text=encoder_outputs.last_hidden_state_text, encoder_vision_hidden_states=encoder_outputs.vision_hidden_states, encoder_text_hidden_states=encoder_outputs.text_hidden_states, encoder_attentions=encoder_outputs.attentions, enc_outputs_class=enc_outputs_class, enc_outputs_coord_logits=enc_outputs_coord_logits, encoder_logits=encoder_logits, encoder_pred_boxes=encoder_pred_boxes)
|
@auto_docstring(custom_intro='\n The bare Grounding DINO Model (consisting of a backbone and encoder-decoder Transformer) outputting raw\n hidden-states without any specific head on top.\n ')
class GroundingDinoModel(GroundingDinoPreTrainedModel):
def __init__(self, config: GroundingDinoConfig):
pass
def freeze_backbone(self):
pass
def unfreeze_backbone(self):
pass
def get_valid_ratio(self, mask):
'''Get the valid ratio of all feature maps.'''
pass
def generate_encoder_output_proposals(self, enc_output, padding_mask, spatial_shapes):
'''Generate the encoder output proposals from encoded enc_output.
Args:
enc_output (`torch.Tensor[batch_size, sequence_length, hidden_size]`): Output of the encoder.
padding_mask (`torch.Tensor[batch_size, sequence_length]`): Padding mask for `enc_output`.
spatial_shapes (`torch.Tensor[num_feature_levels, 2]`): Spatial shapes of the feature maps.
Returns:
`tuple(torch.FloatTensor)`: A tuple of feature map and bbox prediction.
- object_query (Tensor[batch_size, sequence_length, hidden_size]): Object query features. Later used to
directly predict a bounding box. (without the need of a decoder)
- output_proposals (Tensor[batch_size, sequence_length, 4]): Normalized proposals, after an inverse
sigmoid.
'''
pass
@auto_docstring
def forward(self, pixel_values: Tensor, input_ids: Tensor, token_type_ids: Optional[Tensor]=None, attention_mask: Optional[Tensor]=None, pixel_mask: Optional[Tensor]=None, encoder_outputs=None, output_attentions=None, output_hidden_states=None, return_dict=None):
'''
input_ids (`torch.LongTensor` of shape `(batch_size, text_sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
it.
Indices can be obtained using [`AutoTokenizer`]. See [`BertTokenizer.__call__`] for details.
token_type_ids (`torch.LongTensor` of shape `(batch_size, text_sequence_length)`, *optional*):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
1]`: 0 corresponds to a `sentence A` token, 1 corresponds to a `sentence B` token
[What are token type IDs?](../glossary#token-type-ids)
Examples:
```python
>>> from transformers import AutoProcessor, AutoModel
>>> from PIL import Image
>>> import requests
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> text = "a cat."
>>> processor = AutoProcessor.from_pretrained("IDEA-Research/grounding-dino-tiny")
>>> model = AutoModel.from_pretrained("IDEA-Research/grounding-dino-tiny")
>>> inputs = processor(images=image, text=text, return_tensors="pt")
>>> outputs = model(**inputs)
>>> last_hidden_states = outputs.last_hidden_state
>>> list(last_hidden_states.shape)
[1, 900, 256]
```'''
pass
| 9
| 3
| 47
| 6
| 35
| 7
| 5
| 0.19
| 1
| 18
| 10
| 0
| 8
| 13
| 8
| 10
| 386
| 51
| 283
| 101
| 261
| 54
| 173
| 89
| 164
| 23
| 2
| 3
| 39
|
2,903
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/grounding_dino/modeling_grounding_dino.py
|
transformers.models.grounding_dino.modeling_grounding_dino.GroundingDinoModelOutput
|
from dataclasses import dataclass
import torch.nn.functional as F
import torch
from ...file_utils import ModelOutput, is_timm_available, requires_backends
from ...utils import auto_docstring, logging
from typing import Optional, Union
@dataclass
@auto_docstring(custom_intro='\n Base class for outputs of the Grounding DINO encoder-decoder model.\n ')
class GroundingDinoModelOutput(ModelOutput):
"""
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the decoder of the model.
init_reference_points (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)`):
Initial reference points sent through the Transformer decoder.
intermediate_hidden_states (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, num_queries, hidden_size)`):
Stacked intermediate hidden states (output of each layer of the decoder).
intermediate_reference_points (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, num_queries, 4)`):
Stacked intermediate reference points (reference points of each layer of the decoder).
encoder_last_hidden_state_vision (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the encoder of the model.
encoder_last_hidden_state_text (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the encoder of the model.
encoder_vision_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the vision embeddings + one for the output of each
layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the vision encoder at the
output of each layer plus the initial embedding outputs.
encoder_text_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the text embeddings + one for the output of each layer)
of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the text encoder at the output of
each layer plus the initial embedding outputs.
encoder_attentions (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of tuples of `torch.FloatTensor` (one for attention for each layer) of shape `(batch_size, num_heads,
sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the
weighted average in the text-vision attention, vision-text attention, text-enhancer (self-attention) and
multi-scale deformable attention heads. attention softmax, used to compute the weighted average in the
bi-attention heads.
enc_outputs_class (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.num_labels)`, *optional*, returned when `config.two_stage=True`):
Predicted bounding boxes scores where the top `config.num_queries` scoring bounding boxes are picked as
region proposals in the first stage. Output of bounding box binary classification (i.e. foreground and
background).
enc_outputs_coord_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, 4)`, *optional*, returned when `config.two_stage=True`):
Logits of predicted bounding boxes coordinates in the first stage.
encoder_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.num_labels)`, *optional*, returned when `config.two_stage=True`):
Logits of top `config.num_queries` scoring bounding boxes in the first stage.
encoder_pred_boxes (`torch.FloatTensor` of shape `(batch_size, sequence_length, 4)`, *optional*, returned when `config.two_stage=True`):
Coordinates of top `config.num_queries` scoring bounding boxes in the first stage.
"""
last_hidden_state: Optional[torch.FloatTensor] = None
init_reference_points: Optional[torch.FloatTensor] = None
intermediate_hidden_states: Optional[torch.FloatTensor] = None
intermediate_reference_points: Optional[torch.FloatTensor] = None
decoder_hidden_states: Optional[tuple[torch.FloatTensor]] = None
decoder_attentions: Optional[tuple[tuple[torch.FloatTensor]]] = None
encoder_last_hidden_state_vision: Optional[torch.FloatTensor] = None
encoder_last_hidden_state_text: Optional[torch.FloatTensor] = None
encoder_vision_hidden_states: Optional[tuple[torch.FloatTensor]] = None
encoder_text_hidden_states: Optional[tuple[torch.FloatTensor]] = None
encoder_attentions: Optional[tuple[tuple[torch.FloatTensor]]] = None
enc_outputs_class: Optional[torch.FloatTensor] = None
enc_outputs_coord_logits: Optional[torch.FloatTensor] = None
encoder_logits: Optional[torch.FloatTensor] = None
encoder_pred_boxes: Optional[torch.FloatTensor] = None
|
@dataclass
@auto_docstring(custom_intro='\n Base class for outputs of the Grounding DINO encoder-decoder model.\n ')
class GroundingDinoModelOutput(ModelOutput):
'''
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the decoder of the model.
init_reference_points (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)`):
Initial reference points sent through the Transformer decoder.
intermediate_hidden_states (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, num_queries, hidden_size)`):
Stacked intermediate hidden states (output of each layer of the decoder).
intermediate_reference_points (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, num_queries, 4)`):
Stacked intermediate reference points (reference points of each layer of the decoder).
encoder_last_hidden_state_vision (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the encoder of the model.
encoder_last_hidden_state_text (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the encoder of the model.
encoder_vision_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the vision embeddings + one for the output of each
layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the vision encoder at the
output of each layer plus the initial embedding outputs.
encoder_text_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the text embeddings + one for the output of each layer)
of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the text encoder at the output of
each layer plus the initial embedding outputs.
encoder_attentions (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of tuples of `torch.FloatTensor` (one for attention for each layer) of shape `(batch_size, num_heads,
sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the
weighted average in the text-vision attention, vision-text attention, text-enhancer (self-attention) and
multi-scale deformable attention heads. attention softmax, used to compute the weighted average in the
bi-attention heads.
enc_outputs_class (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.num_labels)`, *optional*, returned when `config.two_stage=True`):
Predicted bounding boxes scores where the top `config.num_queries` scoring bounding boxes are picked as
region proposals in the first stage. Output of bounding box binary classification (i.e. foreground and
background).
enc_outputs_coord_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, 4)`, *optional*, returned when `config.two_stage=True`):
Logits of predicted bounding boxes coordinates in the first stage.
encoder_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.num_labels)`, *optional*, returned when `config.two_stage=True`):
Logits of top `config.num_queries` scoring bounding boxes in the first stage.
encoder_pred_boxes (`torch.FloatTensor` of shape `(batch_size, sequence_length, 4)`, *optional*, returned when `config.two_stage=True`):
Coordinates of top `config.num_queries` scoring bounding boxes in the first stage.
'''
pass
| 3
| 1
| 0
| 0
| 0
| 0
| 0
| 3.14
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 60
| 2
| 14
| 14
| 13
| 44
| 14
| 14
| 13
| 0
| 1
| 0
| 0
|
2,904
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/grounding_dino/modeling_grounding_dino.py
|
transformers.models.grounding_dino.modeling_grounding_dino.GroundingDinoMultiheadAttention
|
import math
import torch
from typing import Optional, Union
from torch import Tensor, nn
import torch.nn.functional as F
class GroundingDinoMultiheadAttention(nn.Module):
"""Equivalent implementation of nn.MultiheadAttention with `batch_first=True`."""
def __init__(self, config, num_attention_heads=None):
super().__init__()
if config.hidden_size % num_attention_heads != 0 and (not hasattr(config, 'embedding_size')):
raise ValueError(f'The hidden size ({config.hidden_size}) is not a multiple of the number of attention heads ({num_attention_heads})')
self.num_attention_heads = num_attention_heads
self.attention_head_size = int(config.hidden_size / num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.out_proj = nn.Linear(config.hidden_size, config.hidden_size)
self.dropout = nn.Dropout(config.attention_dropout)
def forward(self, queries: torch.Tensor, keys: torch.Tensor, values: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False) -> tuple[torch.Tensor]:
batch_size, seq_length, _ = queries.shape
query_layer = self.query(queries).view(batch_size, -1, self.num_attention_heads, self.attention_head_size).transpose(1, 2)
key_layer = self.key(keys).view(batch_size, -1, self.num_attention_heads, self.attention_head_size).transpose(1, 2)
value_layer = self.value(values).view(batch_size, -1, self.num_attention_heads, self.attention_head_size).transpose(1, 2)
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
if attention_mask is not None:
attention_scores = attention_scores + attention_mask
attention_probs = nn.functional.softmax(attention_scores, dim=-1)
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(new_context_layer_shape)
context_layer = self.out_proj(context_layer)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
return outputs
|
class GroundingDinoMultiheadAttention(nn.Module):
'''Equivalent implementation of nn.MultiheadAttention with `batch_first=True`.'''
def __init__(self, config, num_attention_heads=None):
pass
def forward(self, queries: torch.Tensor, keys: torch.Tensor, values: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False) -> tuple[torch.Tensor]:
pass
| 3
| 1
| 20
| 4
| 14
| 2
| 2
| 0.14
| 1
| 5
| 0
| 0
| 3
| 8
| 3
| 13
| 66
| 16
| 44
| 28
| 33
| 6
| 34
| 21
| 30
| 3
| 1
| 1
| 6
|
2,905
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/grounding_dino/modeling_grounding_dino.py
|
transformers.models.grounding_dino.modeling_grounding_dino.GroundingDinoMultiscaleDeformableAttention
|
import torch
from typing import Optional, Union
from .configuration_grounding_dino import GroundingDinoConfig
from torch import Tensor, nn
import warnings
import torch.nn.functional as F
class GroundingDinoMultiscaleDeformableAttention(nn.Module):
"""
Multiscale deformable attention as proposed in Deformable DETR.
"""
def __init__(self, config: GroundingDinoConfig, num_heads: int, n_points: int):
super().__init__()
self.attn = MultiScaleDeformableAttention()
if config.d_model % num_heads != 0:
raise ValueError(f'embed_dim (d_model) must be divisible by num_heads, but got {config.d_model} and {num_heads}')
dim_per_head = config.d_model // num_heads
if not (dim_per_head & dim_per_head - 1 == 0 and dim_per_head != 0):
warnings.warn("You'd better set embed_dim (d_model) in GroundingDinoMultiscaleDeformableAttention to make the dimension of each attention head a power of 2 which is more efficient in the authors' CUDA implementation.")
self.im2col_step = 64
self.d_model = config.d_model
self.n_levels = config.num_feature_levels
self.n_heads = num_heads
self.n_points = n_points
self.sampling_offsets = nn.Linear(config.d_model, num_heads * self.n_levels * n_points * 2)
self.attention_weights = nn.Linear(config.d_model, num_heads * self.n_levels * n_points)
self.value_proj = nn.Linear(config.d_model, config.d_model)
self.output_proj = nn.Linear(config.d_model, config.d_model)
self.disable_custom_kernels = config.disable_custom_kernels
def with_pos_embed(self, tensor: torch.Tensor, position_embeddings: Optional[Tensor]):
return tensor if position_embeddings is None else tensor + position_embeddings
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states=None, encoder_attention_mask=None, position_embeddings: Optional[torch.Tensor]=None, reference_points=None, spatial_shapes=None, spatial_shapes_list=None, level_start_index=None, output_attentions: bool=False):
if position_embeddings is not None:
hidden_states = self.with_pos_embed(hidden_states, position_embeddings)
batch_size, num_queries, _ = hidden_states.shape
batch_size, sequence_length, _ = encoder_hidden_states.shape
if (spatial_shapes[:, 0] * spatial_shapes[:, 1]).sum() != sequence_length:
raise ValueError('Make sure to align the spatial shapes with the sequence length of the encoder hidden states')
value = self.value_proj(encoder_hidden_states)
if attention_mask is not None:
value = value.masked_fill(~attention_mask[..., None], float(0))
value = value.view(batch_size, sequence_length, self.n_heads, self.d_model // self.n_heads)
sampling_offsets = self.sampling_offsets(hidden_states).view(batch_size, num_queries, self.n_heads, self.n_levels, self.n_points, 2)
attention_weights = self.attention_weights(hidden_states).view(batch_size, num_queries, self.n_heads, self.n_levels * self.n_points)
attention_weights = F.softmax(attention_weights, -1).view(batch_size, num_queries, self.n_heads, self.n_levels, self.n_points)
num_coordinates = reference_points.shape[-1]
if num_coordinates == 2:
offset_normalizer = torch.stack([spatial_shapes[..., 1], spatial_shapes[..., 0]], -1)
sampling_locations = reference_points[:, :, None, :, None, :] + sampling_offsets / offset_normalizer[None, None, None, :, None, :]
elif num_coordinates == 4:
sampling_locations = reference_points[:, :, None, :, None, :2] + sampling_offsets / self.n_points * reference_points[:, :, None, :, None, 2:] * 0.5
else:
raise ValueError(f'Last dim of reference_points must be 2 or 4, but got {reference_points.shape[-1]}')
output = self.attn(value, spatial_shapes, spatial_shapes_list, level_start_index, sampling_locations, attention_weights, self.im2col_step)
output = self.output_proj(output)
return (output, attention_weights)
|
class GroundingDinoMultiscaleDeformableAttention(nn.Module):
'''
Multiscale deformable attention as proposed in Deformable DETR.
'''
def __init__(self, config: GroundingDinoConfig, num_heads: int, n_points: int):
pass
def with_pos_embed(self, tensor: torch.Tensor, position_embeddings: Optional[Tensor]):
pass
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states=None, encoder_attention_mask=None, position_embeddings: Optional[torch.Tensor]=None, reference_points=None, spatial_shapes=None, spatial_shapes_list=None, level_start_index=None, output_attentions: bool=False):
pass
| 4
| 1
| 38
| 3
| 32
| 3
| 5
| 0.11
| 1
| 9
| 2
| 0
| 3
| 10
| 3
| 13
| 120
| 13
| 96
| 38
| 80
| 11
| 54
| 25
| 50
| 8
| 1
| 2
| 15
|
2,906
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/grounding_dino/modeling_grounding_dino.py
|
transformers.models.grounding_dino.modeling_grounding_dino.GroundingDinoObjectDetectionOutput
|
from dataclasses import dataclass
import torch.nn.functional as F
import torch
from ...file_utils import ModelOutput, is_timm_available, requires_backends
from ...utils import auto_docstring, logging
from typing import Optional, Union
@dataclass
@auto_docstring(custom_intro='\n Output type of [`GroundingDinoForObjectDetection`].\n ')
class GroundingDinoObjectDetectionOutput(ModelOutput):
"""
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` are provided)):
Total loss as a linear combination of a negative log-likehood (cross-entropy) for class prediction and a
bounding box loss. The latter is defined as a linear combination of the L1 loss and the generalized
scale-invariant IoU loss.
loss_dict (`Dict`, *optional*):
A dictionary containing the individual losses. Useful for logging.
logits (`torch.FloatTensor` of shape `(batch_size, num_queries, num_classes + 1)`):
Classification logits (including no-object) for all queries.
pred_boxes (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)`):
Normalized boxes coordinates for all queries, represented as (center_x, center_y, width, height). These
values are normalized in [0, 1], relative to the size of each individual image in the batch (disregarding
possible padding). You can use [`~GroundingDinoProcessor.post_process_grounded_object_detection`] to retrieve the
unnormalized bounding boxes.
auxiliary_outputs (`list[Dict]`, *optional*):
Optional, only returned when auxiliary losses are activated (i.e. `config.auxiliary_loss` is set to `True`)
and labels are provided. It is a list of dictionaries containing the two above keys (`logits` and
`pred_boxes`) for each decoder layer.
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the decoder of the model.
init_reference_points (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)`):
Initial reference points sent through the Transformer decoder.
intermediate_hidden_states (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, num_queries, hidden_size)`):
Stacked intermediate hidden states (output of each layer of the decoder).
intermediate_reference_points (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, num_queries, 4)`):
Stacked intermediate reference points (reference points of each layer of the decoder).
encoder_last_hidden_state_vision (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the encoder of the model.
encoder_last_hidden_state_text (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the encoder of the model.
encoder_vision_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the vision embeddings + one for the output of each
layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the vision encoder at the
output of each layer plus the initial embedding outputs.
encoder_text_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the text embeddings + one for the output of each layer)
of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the text encoder at the output of
each layer plus the initial embedding outputs.
enc_outputs_class (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.num_labels)`, *optional*, returned when `config.two_stage=True`):
Predicted bounding boxes scores where the top `config.num_queries` scoring bounding boxes are picked as
region proposals in the first stage. Output of bounding box binary classification (i.e. foreground and
background).
enc_outputs_coord_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, 4)`, *optional*, returned when `config.two_stage=True`):
Logits of predicted bounding boxes coordinates in the first stage.
encoder_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.num_labels)`, *optional*, returned when `config.two_stage=True`):
Logits of top `config.num_queries` scoring bounding boxes in the first stage.
encoder_pred_boxes (`torch.FloatTensor` of shape `(batch_size, sequence_length, 4)`, *optional*, returned when `config.two_stage=True`):
Coordinates of top `config.num_queries` scoring bounding boxes in the first stage.
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Encoded candidate labels sequence. Used in processor to post process object detection result.
"""
loss: Optional[torch.FloatTensor] = None
loss_dict: Optional[dict] = None
logits: Optional[torch.FloatTensor] = None
pred_boxes: Optional[torch.FloatTensor] = None
auxiliary_outputs: Optional[list[dict]] = None
last_hidden_state: Optional[torch.FloatTensor] = None
init_reference_points: Optional[torch.FloatTensor] = None
intermediate_hidden_states: Optional[torch.FloatTensor] = None
intermediate_reference_points: Optional[torch.FloatTensor] = None
decoder_hidden_states: Optional[tuple[torch.FloatTensor]] = None
decoder_attentions: Optional[tuple[tuple[torch.FloatTensor]]] = None
encoder_last_hidden_state_vision: Optional[torch.FloatTensor] = None
encoder_last_hidden_state_text: Optional[torch.FloatTensor] = None
encoder_vision_hidden_states: Optional[tuple[torch.FloatTensor]] = None
encoder_text_hidden_states: Optional[tuple[torch.FloatTensor]] = None
encoder_attentions: Optional[tuple[tuple[torch.FloatTensor]]] = None
enc_outputs_class: Optional[torch.FloatTensor] = None
enc_outputs_coord_logits: Optional[torch.FloatTensor] = None
encoder_logits: Optional[torch.FloatTensor] = None
encoder_pred_boxes: Optional[torch.FloatTensor] = None
input_ids: Optional[torch.LongTensor] = None
|
@dataclass
@auto_docstring(custom_intro='\n Output type of [`GroundingDinoForObjectDetection`].\n ')
class GroundingDinoObjectDetectionOutput(ModelOutput):
'''
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` are provided)):
Total loss as a linear combination of a negative log-likehood (cross-entropy) for class prediction and a
bounding box loss. The latter is defined as a linear combination of the L1 loss and the generalized
scale-invariant IoU loss.
loss_dict (`Dict`, *optional*):
A dictionary containing the individual losses. Useful for logging.
logits (`torch.FloatTensor` of shape `(batch_size, num_queries, num_classes + 1)`):
Classification logits (including no-object) for all queries.
pred_boxes (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)`):
Normalized boxes coordinates for all queries, represented as (center_x, center_y, width, height). These
values are normalized in [0, 1], relative to the size of each individual image in the batch (disregarding
possible padding). You can use [`~GroundingDinoProcessor.post_process_grounded_object_detection`] to retrieve the
unnormalized bounding boxes.
auxiliary_outputs (`list[Dict]`, *optional*):
Optional, only returned when auxiliary losses are activated (i.e. `config.auxiliary_loss` is set to `True`)
and labels are provided. It is a list of dictionaries containing the two above keys (`logits` and
`pred_boxes`) for each decoder layer.
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the decoder of the model.
init_reference_points (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)`):
Initial reference points sent through the Transformer decoder.
intermediate_hidden_states (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, num_queries, hidden_size)`):
Stacked intermediate hidden states (output of each layer of the decoder).
intermediate_reference_points (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, num_queries, 4)`):
Stacked intermediate reference points (reference points of each layer of the decoder).
encoder_last_hidden_state_vision (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the encoder of the model.
encoder_last_hidden_state_text (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the encoder of the model.
encoder_vision_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the vision embeddings + one for the output of each
layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the vision encoder at the
output of each layer plus the initial embedding outputs.
encoder_text_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the text embeddings + one for the output of each layer)
of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the text encoder at the output of
each layer plus the initial embedding outputs.
enc_outputs_class (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.num_labels)`, *optional*, returned when `config.two_stage=True`):
Predicted bounding boxes scores where the top `config.num_queries` scoring bounding boxes are picked as
region proposals in the first stage. Output of bounding box binary classification (i.e. foreground and
background).
enc_outputs_coord_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, 4)`, *optional*, returned when `config.two_stage=True`):
Logits of predicted bounding boxes coordinates in the first stage.
encoder_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.num_labels)`, *optional*, returned when `config.two_stage=True`):
Logits of top `config.num_queries` scoring bounding boxes in the first stage.
encoder_pred_boxes (`torch.FloatTensor` of shape `(batch_size, sequence_length, 4)`, *optional*, returned when `config.two_stage=True`):
Coordinates of top `config.num_queries` scoring bounding boxes in the first stage.
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Encoded candidate labels sequence. Used in processor to post process object detection result.
'''
pass
| 3
| 1
| 0
| 0
| 0
| 0
| 0
| 3.1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 84
| 2
| 20
| 20
| 19
| 62
| 20
| 20
| 19
| 0
| 1
| 0
| 0
|
2,907
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/grounding_dino/modeling_grounding_dino.py
|
transformers.models.grounding_dino.modeling_grounding_dino.GroundingDinoPreTrainedModel
|
import math
import torch
from ...utils import auto_docstring, logging
from .configuration_grounding_dino import GroundingDinoConfig
from ...modeling_utils import PreTrainedModel
from torch import Tensor, nn
import torch.nn.functional as F
@auto_docstring
class GroundingDinoPreTrainedModel(PreTrainedModel):
config: GroundingDinoConfig
base_model_prefix = 'model'
main_input_name = 'pixel_values'
def _init_weights(self, module):
std = self.config.init_std
if isinstance(module, GroundingDinoLearnedPositionEmbedding):
nn.init.uniform_(module.row_embeddings.weight)
nn.init.uniform_(module.column_embeddings.weight)
elif isinstance(module, GroundingDinoMultiscaleDeformableAttention):
nn.init.constant_(module.sampling_offsets.weight.data, 0.0)
default_dtype = torch.get_default_dtype()
thetas = torch.arange(module.n_heads, dtype=torch.int64).to(default_dtype) * (2.0 * math.pi / module.n_heads)
grid_init = torch.stack([thetas.cos(), thetas.sin()], -1)
grid_init = (grid_init / grid_init.abs().max(-1, keepdim=True)[0]).view(module.n_heads, 1, 1, 2).repeat(1, module.n_levels, module.n_points, 1)
for i in range(module.n_points):
grid_init[:, :, i, :] *= i + 1
with torch.no_grad():
module.sampling_offsets.bias = nn.Parameter(grid_init.view(-1))
nn.init.constant_(module.attention_weights.weight.data, 0.0)
nn.init.constant_(module.attention_weights.bias.data, 0.0)
nn.init.xavier_uniform_(module.value_proj.weight.data)
nn.init.constant_(module.value_proj.bias.data, 0.0)
nn.init.xavier_uniform_(module.output_proj.weight.data)
nn.init.constant_(module.output_proj.bias.data, 0.0)
elif isinstance(module, GroundingDinoBiMultiHeadAttention):
nn.init.xavier_uniform_(module.vision_proj.weight)
module.vision_proj.bias.data.fill_(0)
nn.init.xavier_uniform_(module.text_proj.weight)
module.text_proj.bias.data.fill_(0)
nn.init.xavier_uniform_(module.values_vision_proj.weight)
module.values_vision_proj.bias.data.fill_(0)
nn.init.xavier_uniform_(module.values_text_proj.weight)
module.values_text_proj.bias.data.fill_(0)
nn.init.xavier_uniform_(module.out_vision_proj.weight)
module.out_vision_proj.bias.data.fill_(0)
nn.init.xavier_uniform_(module.out_text_proj.weight)
module.out_text_proj.bias.data.fill_(0)
elif isinstance(module, GroundingDinoFusionLayer):
module.vision_param.data.fill_(0.0001)
module.text_param.data.fill_(0.0001)
elif isinstance(module, (nn.Linear, nn.Conv2d, nn.BatchNorm2d)):
module.weight.data.normal_(mean=0.0, std=std)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, (nn.LayerNorm, nn.GroupNorm)):
module.weight.data.fill_(1.0)
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=std)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, GroundingDinoMLPPredictionHead):
nn.init.constant_(module.layers[-1].weight.data, 0)
nn.init.constant_(module.layers[-1].bias.data, 0)
if hasattr(module, 'reference_points') and (not self.config.two_stage):
nn.init.xavier_uniform_(module.reference_points.weight.data, gain=1.0)
nn.init.constant_(module.reference_points.bias.data, 0.0)
if hasattr(module, 'level_embed'):
nn.init.normal_(module.level_embed)
def _set_gradient_checkpointing(self, module, value=False):
if isinstance(module, GroundingDinoDecoder):
module.gradient_checkpointing = value
|
@auto_docstring
class GroundingDinoPreTrainedModel(PreTrainedModel):
def _init_weights(self, module):
pass
def _set_gradient_checkpointing(self, module, value=False):
pass
| 4
| 0
| 34
| 1
| 32
| 1
| 9
| 0.03
| 1
| 8
| 7
| 4
| 2
| 0
| 2
| 2
| 73
| 4
| 67
| 12
| 64
| 2
| 55
| 12
| 52
| 15
| 1
| 3
| 17
|
2,908
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/grounding_dino/modeling_grounding_dino.py
|
transformers.models.grounding_dino.modeling_grounding_dino.GroundingDinoSinePositionEmbedding
|
import math
from torch import Tensor, nn
import torch
import torch.nn.functional as F
class GroundingDinoSinePositionEmbedding(nn.Module):
"""
This is a more standard version of the position embedding, very similar to the one used by the Attention is all you
need paper, generalized to work on images.
"""
def __init__(self, config):
super().__init__()
self.embedding_dim = config.d_model // 2
self.temperature = config.positional_embedding_temperature
self.scale = 2 * math.pi
def forward(self, pixel_values, pixel_mask):
y_embed = pixel_mask.cumsum(1, dtype=torch.float32)
x_embed = pixel_mask.cumsum(2, dtype=torch.float32)
eps = 1e-06
y_embed = y_embed / (y_embed[:, -1:, :] + eps) * self.scale
x_embed = x_embed / (x_embed[:, :, -1:] + eps) * self.scale
dim_t = torch.arange(self.embedding_dim, dtype=torch.float32, device=pixel_values.device)
dim_t = self.temperature ** (2 * torch.div(dim_t, 2, rounding_mode='floor') / self.embedding_dim)
pos_x = x_embed[:, :, :, None] / dim_t
pos_y = y_embed[:, :, :, None] / dim_t
pos_x = torch.stack((pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4).flatten(3)
pos_y = torch.stack((pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4).flatten(3)
pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2)
return pos
|
class GroundingDinoSinePositionEmbedding(nn.Module):
'''
This is a more standard version of the position embedding, very similar to the one used by the Attention is all you
need paper, generalized to work on images.
'''
def __init__(self, config):
pass
def forward(self, pixel_values, pixel_mask):
pass
| 3
| 1
| 11
| 1
| 10
| 0
| 1
| 0.2
| 1
| 1
| 0
| 0
| 2
| 3
| 2
| 12
| 28
| 4
| 20
| 13
| 17
| 4
| 20
| 13
| 17
| 1
| 1
| 0
| 2
|
2,909
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/grounding_dino/modeling_grounding_dino.py
|
transformers.models.grounding_dino.modeling_grounding_dino.GroundingDinoTextEnhancerLayer
|
from torch import Tensor, nn
import torch.nn.functional as F
import torch
from typing import Optional, Union
from ...activations import ACT2FN
class GroundingDinoTextEnhancerLayer(nn.Module):
"""Vanilla Transformer with text embeddings as input"""
def __init__(self, config):
super().__init__()
self.self_attn = GroundingDinoMultiheadAttention(config, num_attention_heads=config.encoder_attention_heads // 2)
self.fc1 = nn.Linear(config.d_model, config.encoder_ffn_dim // 2)
self.fc2 = nn.Linear(config.encoder_ffn_dim // 2, config.d_model)
self.layer_norm_before = nn.LayerNorm(config.d_model, config.layer_norm_eps)
self.layer_norm_after = nn.LayerNorm(config.d_model, config.layer_norm_eps)
self.activation = ACT2FN[config.activation_function]
self.num_heads = config.encoder_attention_heads // 2
self.dropout = config.text_enhancer_dropout
def with_pos_embed(self, hidden_state: Tensor, position_embeddings: Optional[Tensor]):
return hidden_state if position_embeddings is None else hidden_state + position_embeddings
def forward(self, hidden_states: torch.FloatTensor, attention_masks: Optional[torch.BoolTensor]=None, position_embeddings: Optional[torch.FloatTensor]=None) -> tuple[torch.FloatTensor, torch.FloatTensor]:
"""Text self-attention to enhance projection of text features generated by
the text encoder (AutoModel based on text_config) within GroundingDinoEncoderLayer
Args:
hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_dim)`):
Text features generated by the text encoder.
attention_masks (`torch.BoolTensor`, *optional*):
Attention mask for text self-attention. False for real tokens and True for padding tokens.
position_embeddings (`torch.FloatTensor`, *optional*):
Position embeddings to be added to the hidden states.
Returns:
`tuple(torch.FloatTensor)` comprising two elements:
- **hidden_states** (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`) --
Output of the text self-attention layer.
- **attention_weights** (`torch.FloatTensor` of shape `(batch_size, num_heads, sequence_length,
sequence_length)`) --
Attention weights of the text self-attention layer.
"""
if attention_masks.dim() == 3 and attention_masks.shape[0] == hidden_states.shape[0]:
attention_masks = attention_masks[:, None, :, :]
attention_masks = attention_masks.repeat(1, self.num_heads, 1, 1)
dtype = hidden_states.dtype
attention_masks = attention_masks.to(dtype=dtype)
attention_masks = (1.0 - attention_masks) * torch.finfo(dtype).min
queries = keys = self.with_pos_embed(hidden_states, position_embeddings)
attention_output, attention_weights = self.self_attn(queries=queries, keys=keys, values=hidden_states, attention_mask=attention_masks, output_attentions=True)
attention_output = nn.functional.dropout(attention_output, p=self.dropout, training=self.training)
hidden_states = hidden_states + attention_output
hidden_states = self.layer_norm_before(hidden_states)
residual = hidden_states
hidden_states = self.activation(self.fc1(hidden_states))
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = self.fc2(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = hidden_states + residual
hidden_states = self.layer_norm_after(hidden_states)
return (hidden_states, attention_weights)
|
class GroundingDinoTextEnhancerLayer(nn.Module):
'''Vanilla Transformer with text embeddings as input'''
def __init__(self, config):
pass
def with_pos_embed(self, hidden_state: Tensor, position_embeddings: Optional[Tensor]):
pass
def forward(self, hidden_states: torch.FloatTensor, attention_masks: Optional[torch.BoolTensor]=None, position_embeddings: Optional[torch.FloatTensor]=None) -> tuple[torch.FloatTensor, torch.FloatTensor]:
'''Text self-attention to enhance projection of text features generated by
the text encoder (AutoModel based on text_config) within GroundingDinoEncoderLayer
Args:
hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_dim)`):
Text features generated by the text encoder.
attention_masks (`torch.BoolTensor`, *optional*):
Attention mask for text self-attention. False for real tokens and True for padding tokens.
position_embeddings (`torch.FloatTensor`, *optional*):
Position embeddings to be added to the hidden states.
Returns:
`tuple(torch.FloatTensor)` comprising two elements:
- **hidden_states** (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`) --
Output of the text self-attention layer.
- **attention_weights** (`torch.FloatTensor` of shape `(batch_size, num_heads, sequence_length,
sequence_length)`) --
Attention weights of the text self-attention layer.
'''
pass
| 4
| 2
| 25
| 3
| 15
| 7
| 2
| 0.48
| 1
| 3
| 1
| 0
| 3
| 8
| 3
| 13
| 80
| 13
| 46
| 21
| 37
| 22
| 33
| 16
| 29
| 2
| 1
| 1
| 5
|
2,910
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/grounding_dino/processing_grounding_dino.py
|
transformers.models.grounding_dino.processing_grounding_dino.DictWithDeprecationWarning
|
import warnings
class DictWithDeprecationWarning(dict):
message = 'The key `labels` is will return integer ids in `GroundingDinoProcessor.post_process_grounded_object_detection` output since v4.51.0. Use `text_labels` instead to retrieve string object names.'
def __getitem__(self, key):
if key == 'labels':
warnings.warn(self.message, FutureWarning)
return super().__getitem__(key)
def get(self, key, *args, **kwargs):
if key == 'labels':
warnings.warn(self.message, FutureWarning)
return super().get(key, *args, **kwargs)
|
class DictWithDeprecationWarning(dict):
def __getitem__(self, key):
pass
def get(self, key, *args, **kwargs):
pass
| 3
| 0
| 4
| 0
| 4
| 0
| 2
| 0
| 1
| 2
| 0
| 0
| 2
| 0
| 2
| 29
| 15
| 2
| 13
| 4
| 10
| 0
| 10
| 4
| 7
| 2
| 2
| 1
| 4
|
2,911
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/grounding_dino/processing_grounding_dino.py
|
transformers.models.grounding_dino.processing_grounding_dino.GroundingDinoImagesKwargs
|
from ...image_utils import AnnotationFormat, ImageInput
import pathlib
from ...processing_utils import ImagesKwargs, ProcessingKwargs, ProcessorMixin, Unpack
from typing import TYPE_CHECKING, Optional, Union
class GroundingDinoImagesKwargs(ImagesKwargs, total=False):
annotations: Optional[Union[AnnotationType, list[AnnotationType]]]
return_segmentation_masks: Optional[bool]
masks_path: Optional[Union[str, pathlib.Path]]
do_convert_annotations: Optional[bool]
format: Optional[Union[str, AnnotationFormat]]
|
class GroundingDinoImagesKwargs(ImagesKwargs, total=False):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 6
| 0
| 6
| 1
| 5
| 0
| 6
| 1
| 5
| 0
| 2
| 0
| 0
|
2,912
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/grounding_dino/processing_grounding_dino.py
|
transformers.models.grounding_dino.processing_grounding_dino.GroundingDinoProcessor
|
from ...image_utils import AnnotationFormat, ImageInput
from ...utils import TensorType, is_torch_available
from ...processing_utils import ImagesKwargs, ProcessingKwargs, ProcessorMixin, Unpack
from ...image_transforms import center_to_corners_format
from typing import TYPE_CHECKING, Optional, Union
from ...tokenization_utils_base import BatchEncoding, PreTokenizedInput, TextInput
class GroundingDinoProcessor(ProcessorMixin):
"""
Constructs a Grounding DINO processor which wraps a Deformable DETR image processor and a BERT tokenizer into a
single processor.
[`GroundingDinoProcessor`] offers all the functionalities of [`GroundingDinoImageProcessor`] and
[`AutoTokenizer`]. See the docstring of [`~GroundingDinoProcessor.__call__`] and [`~GroundingDinoProcessor.decode`]
for more information.
Args:
image_processor (`GroundingDinoImageProcessor`):
An instance of [`GroundingDinoImageProcessor`]. The image processor is a required input.
tokenizer (`AutoTokenizer`):
An instance of ['PreTrainedTokenizer`]. The tokenizer is a required input.
"""
attributes = ['image_processor', 'tokenizer']
image_processor_class = 'GroundingDinoImageProcessor'
tokenizer_class = 'AutoTokenizer'
valid_processor_kwargs = GroundingDinoProcessorKwargs
def __init__(self, image_processor, tokenizer):
super().__init__(image_processor, tokenizer)
def __call__(self, images: Optional[ImageInput]=None, text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]]=None, **kwargs: Unpack[GroundingDinoProcessorKwargs]) -> BatchEncoding:
"""
This method uses [`GroundingDinoImageProcessor.__call__`] method to prepare image(s) for the model, and
[`BertTokenizerFast.__call__`] to prepare text for the model.
Args:
images (`ImageInput`, `list[ImageInput]`, *optional*):
The image or batch of images to be processed. The image might be either PIL image, numpy array or a torch tensor.
text (`TextInput`, `PreTokenizedInput`, `list[TextInput]`, `list[PreTokenizedInput]`, *optional*):
Candidate labels to be detected on the image. The text might be one of the following:
- A list of candidate labels (strings) to be detected on the image (e.g. ["a cat", "a dog"]).
- A batch of candidate labels to be detected on the batch of images (e.g. [["a cat", "a dog"], ["a car", "a person"]]).
- A merged candidate labels string to be detected on the image, separated by "." (e.g. "a cat. a dog.").
- A batch of merged candidate labels text to be detected on the batch of images (e.g. ["a cat. a dog.", "a car. a person."]).
"""
if text is not None:
text = self._preprocess_input_text(text)
return super().__call__(images=images, text=text, **kwargs)
def _preprocess_input_text(self, text):
"""
Preprocess input text to ensure that labels are in the correct format for the model.
If the text is a list of candidate labels, merge the candidate labels into a single string,
for example, ["a cat", "a dog"] -> "a cat. a dog.". In case candidate labels are already in a form of
"a cat. a dog.", the text is returned as is.
"""
if _is_list_of_candidate_labels(text):
text = _merge_candidate_labels_text(text)
elif isinstance(text, (list, tuple)) and all((_is_list_of_candidate_labels(t) for t in text)):
text = [_merge_candidate_labels_text(sample) for sample in text]
return text
def post_process_grounded_object_detection(self, outputs: 'GroundingDinoObjectDetectionOutput', input_ids: Optional[TensorType]=None, threshold: float=0.25, text_threshold: float=0.25, target_sizes: Optional[Union[TensorType, list[tuple]]]=None, text_labels: Optional[list[list[str]]]=None):
"""
Converts the raw output of [`GroundingDinoForObjectDetection`] into final bounding boxes in (top_left_x, top_left_y,
bottom_right_x, bottom_right_y) format and get the associated text label.
Args:
outputs ([`GroundingDinoObjectDetectionOutput`]):
Raw outputs of the model.
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
The token ids of the input text. If not provided will be taken from the model output.
threshold (`float`, *optional*, defaults to 0.25):
Threshold to keep object detection predictions based on confidence score.
text_threshold (`float`, *optional*, defaults to 0.25):
Score threshold to keep text detection predictions.
target_sizes (`torch.Tensor` or `list[tuple[int, int]]`, *optional*):
Tensor of shape `(batch_size, 2)` or list of tuples (`tuple[int, int]`) containing the target size
`(height, width)` of each image in the batch. If unset, predictions will not be resized.
text_labels (`list[list[str]]`, *optional*):
List of candidate labels to be detected on each image. At the moment it's *NOT used*, but required
to be in signature for the zero-shot object detection pipeline. Text labels are instead extracted
from the `input_ids` tensor provided in `outputs`.
Returns:
`list[Dict]`: A list of dictionaries, each dictionary containing the
- **scores**: tensor of confidence scores for detected objects
- **boxes**: tensor of bounding boxes in [x0, y0, x1, y1] format
- **labels**: list of text labels for each detected object (will be replaced with integer ids in v4.51.0)
- **text_labels**: list of text labels for detected objects
"""
batch_logits, batch_boxes = (outputs.logits, outputs.pred_boxes)
input_ids = input_ids if input_ids is not None else outputs.input_ids
if target_sizes is not None and len(target_sizes) != len(batch_logits):
raise ValueError('Make sure that you pass in as many target sizes as the batch dimension of the logits')
batch_probs = torch.sigmoid(batch_logits)
batch_scores = torch.max(batch_probs, dim=-1)[0]
batch_boxes = center_to_corners_format(batch_boxes)
if target_sizes is not None:
if isinstance(target_sizes, list):
img_h = torch.Tensor([i[0] for i in target_sizes])
img_w = torch.Tensor([i[1] for i in target_sizes])
else:
img_h, img_w = target_sizes.unbind(1)
scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1).to(batch_boxes.device)
batch_boxes = batch_boxes * scale_fct[:, None, :]
results = []
for idx, (scores, boxes, probs) in enumerate(zip(batch_scores, batch_boxes, batch_probs)):
keep = scores > threshold
scores = scores[keep]
boxes = boxes[keep]
prob = probs[keep]
label_ids = get_phrases_from_posmap(prob > text_threshold, input_ids[idx])
objects_text_labels = self.batch_decode(label_ids)
result = DictWithDeprecationWarning({'scores': scores, 'boxes': boxes, 'text_labels': objects_text_labels, 'labels': objects_text_labels})
results.append(result)
return results
|
class GroundingDinoProcessor(ProcessorMixin):
'''
Constructs a Grounding DINO processor which wraps a Deformable DETR image processor and a BERT tokenizer into a
single processor.
[`GroundingDinoProcessor`] offers all the functionalities of [`GroundingDinoImageProcessor`] and
[`AutoTokenizer`]. See the docstring of [`~GroundingDinoProcessor.__call__`] and [`~GroundingDinoProcessor.decode`]
for more information.
Args:
image_processor (`GroundingDinoImageProcessor`):
An instance of [`GroundingDinoImageProcessor`]. The image processor is a required input.
tokenizer (`AutoTokenizer`):
An instance of ['PreTrainedTokenizer`]. The tokenizer is a required input.
'''
def __init__(self, image_processor, tokenizer):
pass
def __call__(self, images: Optional[ImageInput]=None, text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]]=None, **kwargs: Unpack[GroundingDinoProcessorKwargs]) -> BatchEncoding:
'''
This method uses [`GroundingDinoImageProcessor.__call__`] method to prepare image(s) for the model, and
[`BertTokenizerFast.__call__`] to prepare text for the model.
Args:
images (`ImageInput`, `list[ImageInput]`, *optional*):
The image or batch of images to be processed. The image might be either PIL image, numpy array or a torch tensor.
text (`TextInput`, `PreTokenizedInput`, `list[TextInput]`, `list[PreTokenizedInput]`, *optional*):
Candidate labels to be detected on the image. The text might be one of the following:
- A list of candidate labels (strings) to be detected on the image (e.g. ["a cat", "a dog"]).
- A batch of candidate labels to be detected on the batch of images (e.g. [["a cat", "a dog"], ["a car", "a person"]]).
- A merged candidate labels string to be detected on the image, separated by "." (e.g. "a cat. a dog.").
- A batch of merged candidate labels text to be detected on the batch of images (e.g. ["a cat. a dog.", "a car. a person."]).
'''
pass
def _preprocess_input_text(self, text):
'''
Preprocess input text to ensure that labels are in the correct format for the model.
If the text is a list of candidate labels, merge the candidate labels into a single string,
for example, ["a cat", "a dog"] -> "a cat. a dog.". In case candidate labels are already in a form of
"a cat. a dog.", the text is returned as is.
'''
pass
def post_process_grounded_object_detection(self, outputs: 'GroundingDinoObjectDetectionOutput', input_ids: Optional[TensorType]=None, threshold: float=0.25, text_threshold: float=0.25, target_sizes: Optional[Union[TensorType, list[tuple]]]=None, text_labels: Optional[list[list[str]]]=None):
'''
Converts the raw output of [`GroundingDinoForObjectDetection`] into final bounding boxes in (top_left_x, top_left_y,
bottom_right_x, bottom_right_y) format and get the associated text label.
Args:
outputs ([`GroundingDinoObjectDetectionOutput`]):
Raw outputs of the model.
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
The token ids of the input text. If not provided will be taken from the model output.
threshold (`float`, *optional*, defaults to 0.25):
Threshold to keep object detection predictions based on confidence score.
text_threshold (`float`, *optional*, defaults to 0.25):
Score threshold to keep text detection predictions.
target_sizes (`torch.Tensor` or `list[tuple[int, int]]`, *optional*):
Tensor of shape `(batch_size, 2)` or list of tuples (`tuple[int, int]`) containing the target size
`(height, width)` of each image in the batch. If unset, predictions will not be resized.
text_labels (`list[list[str]]`, *optional*):
List of candidate labels to be detected on each image. At the moment it's *NOT used*, but required
to be in signature for the zero-shot object detection pipeline. Text labels are instead extracted
from the `input_ids` tensor provided in `outputs`.
Returns:
`list[Dict]`: A list of dictionaries, each dictionary containing the
- **scores**: tensor of confidence scores for detected objects
- **boxes**: tensor of bounding boxes in [x0, y0, x1, y1] format
- **labels**: list of text labels for each detected object (will be replaced with integer ids in v4.51.0)
- **text_labels**: list of text labels for detected objects
'''
pass
| 5
| 4
| 24
| 3
| 12
| 9
| 2
| 0.82
| 1
| 14
| 4
| 0
| 7
| 0
| 7
| 24
| 197
| 30
| 93
| 46
| 68
| 76
| 58
| 29
| 50
| 6
| 2
| 2
| 17
|
2,913
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/grounding_dino/processing_grounding_dino.py
|
transformers.models.grounding_dino.processing_grounding_dino.GroundingDinoProcessorKwargs
|
from ...processing_utils import ImagesKwargs, ProcessingKwargs, ProcessorMixin, Unpack
class GroundingDinoProcessorKwargs(ProcessingKwargs, total=False):
images_kwargs: GroundingDinoImagesKwargs
_defaults = {'text_kwargs': {'add_special_tokens': True, 'padding': False, 'stride': 0, 'return_overflowing_tokens': False, 'return_special_tokens_mask': False, 'return_offsets_mapping': False, 'return_token_type_ids': True, 'return_length': False, 'verbose': True}}
|
class GroundingDinoProcessorKwargs(ProcessingKwargs, total=False):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 15
| 0
| 15
| 2
| 14
| 0
| 3
| 2
| 2
| 0
| 3
| 0
| 0
|
2,914
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/groupvit/configuration_groupvit.py
|
transformers.models.groupvit.configuration_groupvit.GroupViTConfig
|
from ...configuration_utils import PretrainedConfig
class GroupViTConfig(PretrainedConfig):
"""
[`GroupViTConfig`] is the configuration class to store the configuration of a [`GroupViTModel`]. It is used to
instantiate a GroupViT model according to the specified arguments, defining the text model and vision model
configs. Instantiating a configuration with the defaults will yield a similar configuration to that of the GroupViT
[nvidia/groupvit-gcc-yfcc](https://huggingface.co/nvidia/groupvit-gcc-yfcc) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
text_config (`dict`, *optional*):
Dictionary of configuration options used to initialize [`GroupViTTextConfig`].
vision_config (`dict`, *optional*):
Dictionary of configuration options used to initialize [`GroupViTVisionConfig`].
projection_dim (`int`, *optional*, defaults to 256):
Dimensionality of text and vision projection layers.
projection_intermediate_dim (`int`, *optional*, defaults to 4096):
Dimensionality of intermediate layer of text and vision projection layers.
logit_scale_init_value (`float`, *optional*, defaults to 2.6592):
The initial value of the *logit_scale* parameter. Default is used as per the original GroupViT
implementation.
kwargs (*optional*):
Dictionary of keyword arguments.
"""
model_type = 'groupvit'
sub_configs = {'text_config': GroupViTTextConfig, 'vision_config': GroupViTVisionConfig}
def __init__(self, text_config=None, vision_config=None, projection_dim=256, projection_intermediate_dim=4096, logit_scale_init_value=2.6592, **kwargs):
text_config_dict = kwargs.pop('text_config_dict', None)
vision_config_dict = kwargs.pop('vision_config_dict', None)
super().__init__(**kwargs)
if text_config_dict is not None:
if text_config is None:
text_config = {}
_text_config_dict = GroupViTTextConfig(**text_config_dict).to_dict()
for key, value in _text_config_dict.items():
if key in text_config and value != text_config[key] and (key not in ['transformers_version']):
if key in text_config_dict:
message = f'`{key}` is found in both `text_config_dict` and `text_config` but with different values. The value `text_config_dict["{key}"]` will be used instead.'
else:
message = f'`text_config_dict` is provided which will be used to initialize `GroupViTTextConfig`. The value `text_config["{key}"]` will be overridden.'
logger.info(message)
text_config.update(_text_config_dict)
if vision_config_dict is not None:
if vision_config is None:
vision_config = {}
_vision_config_dict = GroupViTVisionConfig(**vision_config_dict).to_dict()
if 'id2label' in _vision_config_dict:
_vision_config_dict['id2label'] = {str(key): value for key, value in _vision_config_dict['id2label'].items()}
for key, value in _vision_config_dict.items():
if key in vision_config and value != vision_config[key] and (key not in ['transformers_version']):
if key in vision_config_dict:
message = f'`{key}` is found in both `vision_config_dict` and `vision_config` but with different values. The value `vision_config_dict["{key}"]` will be used instead.'
else:
message = f'`vision_config_dict` is provided which will be used to initialize `GroupViTVisionConfig`. The value `vision_config["{key}"]` will be overridden.'
logger.info(message)
vision_config.update(_vision_config_dict)
if text_config is None:
text_config = {}
logger.info('`text_config` is `None`. Initializing the `GroupViTTextConfig` with default values.')
if vision_config is None:
vision_config = {}
logger.info('`vision_config` is `None`. initializing the `GroupViTVisionConfig` with default values.')
self.text_config = GroupViTTextConfig(**text_config)
self.vision_config = GroupViTVisionConfig(**vision_config)
self.projection_dim = projection_dim
self.projection_intermediate_dim = projection_intermediate_dim
self.logit_scale_init_value = logit_scale_init_value
self.initializer_range = 0.02
self.initializer_factor = 1.0
self.output_segmentation = False
|
class GroupViTConfig(PretrainedConfig):
'''
[`GroupViTConfig`] is the configuration class to store the configuration of a [`GroupViTModel`]. It is used to
instantiate a GroupViT model according to the specified arguments, defining the text model and vision model
configs. Instantiating a configuration with the defaults will yield a similar configuration to that of the GroupViT
[nvidia/groupvit-gcc-yfcc](https://huggingface.co/nvidia/groupvit-gcc-yfcc) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
text_config (`dict`, *optional*):
Dictionary of configuration options used to initialize [`GroupViTTextConfig`].
vision_config (`dict`, *optional*):
Dictionary of configuration options used to initialize [`GroupViTVisionConfig`].
projection_dim (`int`, *optional*, defaults to 256):
Dimensionality of text and vision projection layers.
projection_intermediate_dim (`int`, *optional*, defaults to 4096):
Dimensionality of intermediate layer of text and vision projection layers.
logit_scale_init_value (`float`, *optional*, defaults to 2.6592):
The initial value of the *logit_scale* parameter. Default is used as per the original GroupViT
implementation.
kwargs (*optional*):
Dictionary of keyword arguments.
'''
def __init__(self, text_config=None, vision_config=None, projection_dim=256, projection_intermediate_dim=4096, logit_scale_init_value=2.6592, **kwargs):
pass
| 2
| 1
| 53
| 8
| 34
| 12
| 8
| 0.63
| 1
| 4
| 2
| 0
| 1
| 8
| 2
| 2
| 137
| 20
| 72
| 28
| 60
| 45
| 47
| 19
| 44
| 14
| 1
| 4
| 15
|
2,915
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/groupvit/configuration_groupvit.py
|
transformers.models.groupvit.configuration_groupvit.GroupViTOnnxConfig
|
from ...onnx import OnnxConfig
from typing import TYPE_CHECKING, Any
from collections import OrderedDict
from collections.abc import Mapping
class GroupViTOnnxConfig(OnnxConfig):
@property
def inputs(self) -> Mapping[str, Mapping[int, str]]:
return OrderedDict([('input_ids', {0: 'batch', 1: 'sequence'}), ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ('attention_mask', {0: 'batch', 1: 'sequence'})])
@property
def outputs(self) -> Mapping[str, Mapping[int, str]]:
return OrderedDict([('logits_per_image', {0: 'batch'}), ('logits_per_text', {0: 'batch'}), ('text_embeds', {0: 'batch'}), ('image_embeds', {0: 'batch'})])
@property
def atol_for_validation(self) -> float:
return 0.0001
def generate_dummy_inputs(self, processor: 'ProcessorMixin', batch_size: int=-1, seq_length: int=-1) -> Mapping[str, Any]:
text_input_dict = super().generate_dummy_inputs(processor.tokenizer, batch_size=batch_size, seq_length=seq_length)
image_input_dict = super().generate_dummy_inputs(processor.image_processor, batch_size=batch_size)
return {**text_input_dict, **image_input_dict}
@property
def default_onnx_opset(self) -> int:
return 14
|
class GroupViTOnnxConfig(OnnxConfig):
@property
def inputs(self) -> Mapping[str, Mapping[int, str]]:
pass
@property
def outputs(self) -> Mapping[str, Mapping[int, str]]:
pass
@property
def atol_for_validation(self) -> float:
pass
def generate_dummy_inputs(self, processor: 'ProcessorMixin', batch_size: int=-1, seq_length: int=-1) -> Mapping[str, Any]:
pass
@property
def default_onnx_opset(self) -> int:
pass
| 10
| 0
| 7
| 0
| 7
| 0
| 1
| 0
| 1
| 6
| 0
| 0
| 5
| 0
| 5
| 5
| 44
| 4
| 40
| 18
| 24
| 0
| 13
| 8
| 7
| 1
| 1
| 0
| 5
|
2,916
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/groupvit/configuration_groupvit.py
|
transformers.models.groupvit.configuration_groupvit.GroupViTTextConfig
|
from ...configuration_utils import PretrainedConfig
class GroupViTTextConfig(PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`GroupViTTextModel`]. It is used to instantiate an
GroupViT model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the GroupViT
[nvidia/groupvit-gcc-yfcc](https://huggingface.co/nvidia/groupvit-gcc-yfcc) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 49408):
Vocabulary size of the GroupViT text model. Defines the number of different tokens that can be represented
by the `inputs_ids` passed when calling [`GroupViTModel`].
hidden_size (`int`, *optional*, defaults to 256):
Dimensionality of the encoder layers and the pooler layer.
intermediate_size (`int`, *optional*, defaults to 1024):
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 4):
Number of attention heads for each attention layer in the Transformer encoder.
max_position_embeddings (`int`, *optional*, defaults to 77):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
hidden_act (`str` or `function`, *optional*, defaults to `"quick_gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` `"quick_gelu"` are supported.
layer_norm_eps (`float`, *optional*, defaults to 1e-5):
The epsilon used by the layer normalization layers.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
dropout (`float`, *optional*, defaults to 0.0):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
initializer_factor (`float`, *optional*, defaults to 1.0):
A factor for initializing all weight matrices (should be kept to 1, used internally for initialization
testing).
Example:
```python
>>> from transformers import GroupViTTextConfig, GroupViTTextModel
>>> # Initializing a GroupViTTextModel with nvidia/groupvit-gcc-yfcc style configuration
>>> configuration = GroupViTTextConfig()
>>> model = GroupViTTextModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = 'groupvit_text_model'
base_config_key = 'text_config'
def __init__(self, vocab_size=49408, hidden_size=256, intermediate_size=1024, num_hidden_layers=12, num_attention_heads=4, max_position_embeddings=77, hidden_act='quick_gelu', layer_norm_eps=1e-05, dropout=0.0, attention_dropout=0.0, initializer_range=0.02, initializer_factor=1.0, pad_token_id=1, bos_token_id=49406, eos_token_id=49407, **kwargs):
super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.dropout = dropout
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.max_position_embeddings = max_position_embeddings
self.layer_norm_eps = layer_norm_eps
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.initializer_factor = initializer_factor
self.attention_dropout = attention_dropout
|
class GroupViTTextConfig(PretrainedConfig):
'''
This is the configuration class to store the configuration of a [`GroupViTTextModel`]. It is used to instantiate an
GroupViT model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the GroupViT
[nvidia/groupvit-gcc-yfcc](https://huggingface.co/nvidia/groupvit-gcc-yfcc) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 49408):
Vocabulary size of the GroupViT text model. Defines the number of different tokens that can be represented
by the `inputs_ids` passed when calling [`GroupViTModel`].
hidden_size (`int`, *optional*, defaults to 256):
Dimensionality of the encoder layers and the pooler layer.
intermediate_size (`int`, *optional*, defaults to 1024):
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 4):
Number of attention heads for each attention layer in the Transformer encoder.
max_position_embeddings (`int`, *optional*, defaults to 77):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
hidden_act (`str` or `function`, *optional*, defaults to `"quick_gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` `"quick_gelu"` are supported.
layer_norm_eps (`float`, *optional*, defaults to 1e-5):
The epsilon used by the layer normalization layers.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
dropout (`float`, *optional*, defaults to 0.0):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
initializer_factor (`float`, *optional*, defaults to 1.0):
A factor for initializing all weight matrices (should be kept to 1, used internally for initialization
testing).
Example:
```python
>>> from transformers import GroupViTTextConfig, GroupViTTextModel
>>> # Initializing a GroupViTTextModel with nvidia/groupvit-gcc-yfcc style configuration
>>> configuration = GroupViTTextConfig()
>>> model = GroupViTTextModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```'''
def __init__(self, vocab_size=49408, hidden_size=256, intermediate_size=1024, num_hidden_layers=12, num_attention_heads=4, max_position_embeddings=77, hidden_act='quick_gelu', layer_norm_eps=1e-05, dropout=0.0, attention_dropout=0.0, initializer_range=0.02, initializer_factor=1.0, pad_token_id=1, bos_token_id=49406, eos_token_id=49407, **kwargs):
pass
| 2
| 1
| 33
| 1
| 32
| 0
| 1
| 1.29
| 1
| 1
| 0
| 0
| 1
| 12
| 1
| 1
| 90
| 10
| 35
| 34
| 15
| 45
| 17
| 16
| 15
| 1
| 1
| 0
| 1
|
2,917
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/groupvit/configuration_groupvit.py
|
transformers.models.groupvit.configuration_groupvit.GroupViTVisionConfig
|
from ...configuration_utils import PretrainedConfig
class GroupViTVisionConfig(PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`GroupViTVisionModel`]. It is used to instantiate
an GroupViT model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the GroupViT
[nvidia/groupvit-gcc-yfcc](https://huggingface.co/nvidia/groupvit-gcc-yfcc) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
hidden_size (`int`, *optional*, defaults to 384):
Dimensionality of the encoder layers and the pooler layer.
intermediate_size (`int`, *optional*, defaults to 1536):
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
depths (`list[int]`, *optional*, defaults to [6, 3, 3]):
The number of layers in each encoder block.
num_group_tokens (`list[int]`, *optional*, defaults to [64, 8, 0]):
The number of group tokens for each stage.
num_output_groups (`list[int]`, *optional*, defaults to [64, 8, 8]):
The number of output groups for each stage, 0 means no group.
num_attention_heads (`int`, *optional*, defaults to 6):
Number of attention heads for each attention layer in the Transformer encoder.
image_size (`int`, *optional*, defaults to 224):
The size (resolution) of each image.
patch_size (`int`, *optional*, defaults to 16):
The size (resolution) of each patch.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` `"quick_gelu"` are supported.
layer_norm_eps (`float`, *optional*, defaults to 1e-5):
The epsilon used by the layer normalization layers.
dropout (`float`, *optional*, defaults to 0.0):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
initializer_factor (`float`, *optional*, defaults to 1.0):
A factor for initializing all weight matrices (should be kept to 1, used internally for initialization
testing).
Example:
```python
>>> from transformers import GroupViTVisionConfig, GroupViTVisionModel
>>> # Initializing a GroupViTVisionModel with nvidia/groupvit-gcc-yfcc style configuration
>>> configuration = GroupViTVisionConfig()
>>> model = GroupViTVisionModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = 'groupvit_vision_model'
base_config_key = 'vision_config'
def __init__(self, hidden_size=384, intermediate_size=1536, depths=[6, 3, 3], num_hidden_layers=12, num_group_tokens=[64, 8, 0], num_output_groups=[64, 8, 8], num_attention_heads=6, image_size=224, patch_size=16, num_channels=3, hidden_act='gelu', layer_norm_eps=1e-05, dropout=0.0, attention_dropout=0.0, initializer_range=0.02, initializer_factor=1.0, assign_eps=1.0, assign_mlp_ratio=[0.5, 4], **kwargs):
super().__init__(**kwargs)
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.depths = depths
if num_hidden_layers != sum(depths):
logger.warning(f'Manually setting num_hidden_layers to {num_hidden_layers}, but we expect num_hidden_layers = sum(depth) = {sum(depths)}')
self.num_hidden_layers = num_hidden_layers
self.num_group_tokens = num_group_tokens
self.num_output_groups = num_output_groups
self.num_attention_heads = num_attention_heads
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.hidden_act = hidden_act
self.layer_norm_eps = layer_norm_eps
self.dropout = dropout
self.attention_dropout = attention_dropout
self.initializer_range = initializer_range
self.initializer_factor = initializer_factor
self.assign_eps = assign_eps
self.assign_mlp_ratio = assign_mlp_ratio
|
class GroupViTVisionConfig(PretrainedConfig):
'''
This is the configuration class to store the configuration of a [`GroupViTVisionModel`]. It is used to instantiate
an GroupViT model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the GroupViT
[nvidia/groupvit-gcc-yfcc](https://huggingface.co/nvidia/groupvit-gcc-yfcc) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
hidden_size (`int`, *optional*, defaults to 384):
Dimensionality of the encoder layers and the pooler layer.
intermediate_size (`int`, *optional*, defaults to 1536):
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
depths (`list[int]`, *optional*, defaults to [6, 3, 3]):
The number of layers in each encoder block.
num_group_tokens (`list[int]`, *optional*, defaults to [64, 8, 0]):
The number of group tokens for each stage.
num_output_groups (`list[int]`, *optional*, defaults to [64, 8, 8]):
The number of output groups for each stage, 0 means no group.
num_attention_heads (`int`, *optional*, defaults to 6):
Number of attention heads for each attention layer in the Transformer encoder.
image_size (`int`, *optional*, defaults to 224):
The size (resolution) of each image.
patch_size (`int`, *optional*, defaults to 16):
The size (resolution) of each patch.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` `"quick_gelu"` are supported.
layer_norm_eps (`float`, *optional*, defaults to 1e-5):
The epsilon used by the layer normalization layers.
dropout (`float`, *optional*, defaults to 0.0):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
initializer_factor (`float`, *optional*, defaults to 1.0):
A factor for initializing all weight matrices (should be kept to 1, used internally for initialization
testing).
Example:
```python
>>> from transformers import GroupViTVisionConfig, GroupViTVisionModel
>>> # Initializing a GroupViTVisionModel with nvidia/groupvit-gcc-yfcc style configuration
>>> configuration = GroupViTVisionConfig()
>>> model = GroupViTVisionModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```'''
def __init__(self, hidden_size=384, intermediate_size=1536, depths=[6, 3, 3], num_hidden_layers=12, num_group_tokens=[64, 8, 0], num_output_groups=[64, 8, 8], num_attention_heads=6, image_size=224, patch_size=16, num_channels=3, hidden_act='gelu', layer_norm_eps=1e-05, dropout=0.0, attention_dropout=0.0, initializer_range=0.02, initializer_factor=1.0, assign_eps=1.0, assign_mlp_ratio=[0.5, 4], **kwargs):
pass
| 2
| 1
| 47
| 1
| 46
| 0
| 2
| 0.96
| 1
| 1
| 0
| 0
| 1
| 18
| 1
| 1
| 106
| 10
| 49
| 43
| 26
| 47
| 25
| 22
| 23
| 2
| 1
| 1
| 2
|
2,918
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/groupvit/modeling_groupvit.py
|
transformers.models.groupvit.modeling_groupvit.GroupViTAssignAttention
|
from torch import nn
from .configuration_groupvit import GroupViTConfig, GroupViTTextConfig, GroupViTVisionConfig
class GroupViTAssignAttention(nn.Module):
def __init__(self, config: GroupViTVisionConfig):
super().__init__()
self.scale = config.hidden_size ** (-0.5)
self.q_proj = nn.Linear(config.hidden_size, config.hidden_size)
self.k_proj = nn.Linear(config.hidden_size, config.hidden_size)
self.v_proj = nn.Linear(config.hidden_size, config.hidden_size)
self.proj = nn.Linear(config.hidden_size, config.hidden_size)
self.assign_eps = config.assign_eps
def get_attn(self, attn, gumbel=True, hard=True):
if gumbel and self.training:
attn = gumbel_softmax(attn, dim=-2, hard=hard)
elif hard:
attn = hard_softmax(attn, dim=-2)
else:
attn = nn.functional.softmax(attn, dim=-2)
return attn
def forward(self, query, key):
value = key
query = self.q_proj(query)
key = self.k_proj(key)
value = self.v_proj(value)
raw_attn = query @ key.transpose(-2, -1) * self.scale
attn = self.get_attn(raw_attn)
soft_attn = self.get_attn(raw_attn, gumbel=False, hard=False)
attn = attn / (attn.sum(dim=-1, keepdim=True) + self.assign_eps)
out = attn @ value
out = self.proj(out)
return (out, soft_attn)
|
class GroupViTAssignAttention(nn.Module):
def __init__(self, config: GroupViTVisionConfig):
pass
def get_attn(self, attn, gumbel=True, hard=True):
pass
def forward(self, query, key):
pass
| 4
| 0
| 14
| 3
| 10
| 1
| 2
| 0.13
| 1
| 2
| 1
| 0
| 3
| 6
| 3
| 13
| 46
| 12
| 30
| 15
| 26
| 4
| 28
| 15
| 24
| 3
| 1
| 2
| 5
|
2,919
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/groupvit/modeling_groupvit.py
|
transformers.models.groupvit.modeling_groupvit.GroupViTAttention
|
import torch
from torch import nn
from typing import Any, Optional, Union
class GroupViTAttention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, config):
super().__init__()
self.config = config
self.embed_dim = config.hidden_size
self.num_heads = config.num_attention_heads
self.head_dim = self.embed_dim // self.num_heads
if self.head_dim * self.num_heads != self.embed_dim:
raise ValueError(f'embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {self.num_heads}).')
self.scale = self.head_dim ** (-0.5)
self.dropout = config.attention_dropout
self.k_proj = nn.Linear(self.embed_dim, self.embed_dim)
self.v_proj = nn.Linear(self.embed_dim, self.embed_dim)
self.q_proj = nn.Linear(self.embed_dim, self.embed_dim)
self.out_proj = nn.Linear(self.embed_dim, self.embed_dim)
def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, causal_attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
"""Input shape: Batch x Time x Channel"""
bsz, tgt_len, embed_dim = hidden_states.size()
is_cross_attention = encoder_hidden_states is not None
query_states = self.q_proj(hidden_states) * self.scale
if is_cross_attention:
key_states = self._shape(self.k_proj(encoder_hidden_states), -1, bsz)
value_states = self._shape(self.v_proj(encoder_hidden_states), -1, bsz)
else:
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
proj_shape = (bsz * self.num_heads, -1, self.head_dim)
query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
key_states = key_states.view(*proj_shape)
value_states = value_states.view(*proj_shape)
src_len = key_states.size(1)
attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):
raise ValueError(f'Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is {attn_weights.size()}')
if causal_attention_mask is not None:
if causal_attention_mask.size() != (bsz, 1, tgt_len, src_len):
raise ValueError(f'Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {causal_attention_mask.size()}')
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + causal_attention_mask
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
if attention_mask is not None:
if attention_mask.size() != (bsz, 1, tgt_len, src_len):
raise ValueError(f'Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}')
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
if output_attentions:
attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)
else:
attn_weights_reshaped = None
attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
attn_output = torch.bmm(attn_probs, value_states)
if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
raise ValueError(f'`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is {attn_output.size()}')
attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
attn_output = attn_output.transpose(1, 2)
attn_output = attn_output.reshape(bsz, tgt_len, embed_dim)
attn_output = self.out_proj(attn_output)
return (attn_output, attn_weights_reshaped)
|
class GroupViTAttention(nn.Module):
'''Multi-headed attention from 'Attention Is All You Need' paper'''
def __init__(self, config):
pass
def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
pass
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, causal_attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
'''Input shape: Batch x Time x Channel'''
pass
| 4
| 2
| 34
| 5
| 27
| 2
| 4
| 0.1
| 1
| 5
| 0
| 0
| 3
| 10
| 3
| 13
| 108
| 19
| 81
| 32
| 70
| 8
| 58
| 25
| 54
| 9
| 1
| 2
| 12
|
2,920
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/groupvit/modeling_groupvit.py
|
transformers.models.groupvit.modeling_groupvit.GroupViTCrossAttentionLayer
|
from torch import nn
from .configuration_groupvit import GroupViTConfig, GroupViTTextConfig, GroupViTVisionConfig
class GroupViTCrossAttentionLayer(nn.Module):
def __init__(self, config: GroupViTVisionConfig):
super().__init__()
self.attn = GroupViTAttention(config)
self.norm2 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.mlp = GroupViTMLP(config)
self.norm_post = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(self, query, key):
x = query
x = x + self.attn(query, encoder_hidden_states=key)[0]
x = x + self.mlp(self.norm2(x))
x = self.norm_post(x)
return x
|
class GroupViTCrossAttentionLayer(nn.Module):
def __init__(self, config: GroupViTVisionConfig):
pass
def forward(self, query, key):
pass
| 3
| 0
| 6
| 0
| 6
| 0
| 1
| 0
| 1
| 4
| 3
| 0
| 2
| 4
| 2
| 12
| 14
| 1
| 13
| 8
| 10
| 0
| 13
| 8
| 10
| 1
| 1
| 0
| 2
|
2,921
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/groupvit/modeling_groupvit.py
|
transformers.models.groupvit.modeling_groupvit.GroupViTEncoderLayer
|
import torch
from .configuration_groupvit import GroupViTConfig, GroupViTTextConfig, GroupViTVisionConfig
from torch import nn
from typing import Any, Optional, Union
from ...modeling_layers import GradientCheckpointingLayer
class GroupViTEncoderLayer(GradientCheckpointingLayer):
def __init__(self, config: GroupViTConfig):
super().__init__()
self.embed_dim = config.hidden_size
self.self_attn = GroupViTAttention(config)
self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
self.mlp = GroupViTMLP(config)
self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, causal_attention_mask: torch.Tensor, output_attentions: Optional[bool]=False) -> tuple[torch.FloatTensor]:
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
`(config.encoder_attention_heads,)`.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
"""
residual = hidden_states
hidden_states = self.layer_norm1(hidden_states)
hidden_states, attn_weights = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, causal_attention_mask=causal_attention_mask, output_attentions=output_attentions)
hidden_states = residual + hidden_states
residual = hidden_states
hidden_states = self.layer_norm2(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + hidden_states
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights,)
return outputs
|
class GroupViTEncoderLayer(GradientCheckpointingLayer):
def __init__(self, config: GroupViTConfig):
pass
def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, causal_attention_mask: torch.Tensor, output_attentions: Optional[bool]=False) -> tuple[torch.FloatTensor]:
'''
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
`(config.encoder_attention_heads,)`.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
'''
pass
| 3
| 1
| 23
| 3
| 16
| 5
| 2
| 0.31
| 1
| 6
| 3
| 0
| 2
| 5
| 2
| 12
| 48
| 6
| 32
| 17
| 23
| 10
| 21
| 11
| 18
| 2
| 1
| 1
| 3
|
2,922
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/groupvit/modeling_groupvit.py
|
transformers.models.groupvit.modeling_groupvit.GroupViTMLP
|
from torch import nn
from typing import Any, Optional, Union
from .configuration_groupvit import GroupViTConfig, GroupViTTextConfig, GroupViTVisionConfig
from ...activations import ACT2FN
import torch
class GroupViTMLP(nn.Module):
def __init__(self, config: GroupViTVisionConfig, hidden_size: Optional[int]=None, intermediate_size: Optional[int]=None, output_size: Optional[int]=None):
super().__init__()
self.config = config
self.activation_fn = ACT2FN[config.hidden_act]
hidden_size = hidden_size if hidden_size is not None else config.hidden_size
intermediate_size = intermediate_size if intermediate_size is not None else config.intermediate_size
output_size = output_size if output_size is not None else hidden_size
self.fc1 = nn.Linear(hidden_size, intermediate_size)
self.fc2 = nn.Linear(intermediate_size, output_size)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.fc1(hidden_states)
hidden_states = self.activation_fn(hidden_states)
hidden_states = self.fc2(hidden_states)
return hidden_states
|
class GroupViTMLP(nn.Module):
def __init__(self, config: GroupViTVisionConfig, hidden_size: Optional[int]=None, intermediate_size: Optional[int]=None, output_size: Optional[int]=None):
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 10
| 0
| 10
| 0
| 3
| 0
| 1
| 4
| 1
| 1
| 2
| 4
| 2
| 12
| 22
| 1
| 21
| 13
| 12
| 0
| 15
| 7
| 12
| 4
| 1
| 0
| 5
|
2,923
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/groupvit/modeling_groupvit.py
|
transformers.models.groupvit.modeling_groupvit.GroupViTMixerMLP
|
class GroupViTMixerMLP(GroupViTMLP):
def forward(self, x):
x = super().forward(x.transpose(1, 2))
return x.transpose(1, 2)
|
class GroupViTMixerMLP(GroupViTMLP):
def forward(self, x):
pass
| 2
| 0
| 3
| 0
| 3
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 1
| 13
| 4
| 0
| 4
| 2
| 2
| 0
| 4
| 2
| 2
| 1
| 2
| 0
| 1
|
2,924
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/groupvit/modeling_groupvit.py
|
transformers.models.groupvit.modeling_groupvit.GroupViTModel
|
from typing import Any, Optional, Union
from torch import nn
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling
from ...utils import ModelOutput, auto_docstring, filter_out_non_signature_kwargs, logging, torch_int
from .configuration_groupvit import GroupViTConfig, GroupViTTextConfig, GroupViTVisionConfig
import torch
@auto_docstring
class GroupViTModel(GroupViTPreTrainedModel):
config: GroupViTConfig
def __init__(self, config: GroupViTConfig):
super().__init__(config)
if not isinstance(config.text_config, GroupViTTextConfig):
raise TypeError(f'config.text_config is expected to be of type GroupViTTextConfig but is of type {type(config.text_config)}.')
if not isinstance(config.vision_config, GroupViTVisionConfig):
raise TypeError(f'config.vision_config is expected to be of type GroupViTVisionConfig but is of type {type(config.vision_config)}.')
text_config = config.text_config
vision_config = config.vision_config
self.projection_dim = config.projection_dim
self.projection_intermediate_dim = config.projection_intermediate_dim
self.text_embed_dim = text_config.hidden_size
self.vision_embed_dim = vision_config.hidden_size
self.text_model = GroupViTTextTransformer(text_config)
self.vision_model = GroupViTVisionTransformer(vision_config)
self.visual_projection = nn.Sequential(nn.Linear(self.vision_embed_dim, self.projection_intermediate_dim, bias=True), nn.BatchNorm1d(self.projection_intermediate_dim), nn.ReLU(inplace=True), nn.Linear(self.projection_intermediate_dim, self.projection_dim, bias=True))
self.text_projection = nn.Sequential(nn.Linear(self.text_embed_dim, self.projection_intermediate_dim, bias=True), nn.BatchNorm1d(self.projection_intermediate_dim), nn.ReLU(inplace=True), nn.Linear(self.projection_intermediate_dim, self.projection_dim, bias=True))
self.logit_scale = nn.Parameter(torch.tensor(self.config.logit_scale_init_value))
self.post_init()
@filter_out_non_signature_kwargs()
@auto_docstring
def get_text_features(self, input_ids: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None) -> torch.FloatTensor:
"""
Returns:
text_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by
applying the projection layer to the pooled output of [`GroupViTTextModel`].
Examples:
```python
>>> import torch
>>> from transformers import CLIPTokenizer, GroupViTModel
>>> model = GroupViTModel.from_pretrained("nvidia/groupvit-gcc-yfcc")
>>> tokenizer = CLIPTokenizer.from_pretrained("nvidia/groupvit-gcc-yfcc")
>>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="pt")
>>> with torch.inference_mode():
... text_features = model.get_text_features(**inputs)
```"""
text_outputs: BaseModelOutputWithPooling = self.text_model(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids)
text_features = self.text_projection(text_outputs.pooler_output)
return text_features
@filter_out_non_signature_kwargs()
@auto_docstring
def get_image_features(self, pixel_values: torch.Tensor) -> torch.FloatTensor:
"""
Returns:
image_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The image embeddings obtained by
applying the projection layer to the pooled output of [`GroupViTVisionModel`].
Examples:
```python
>>> import torch
>>> from transformers import AutoProcessor, GroupViTModel
>>> from transformers.image_utils import load_image
>>> model = GroupViTModel.from_pretrained("nvidia/groupvit-gcc-yfcc")
>>> processor = AutoProcessor.from_pretrained("nvidia/groupvit-gcc-yfcc")
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = load_image(url)
>>> inputs = processor(images=image, return_tensors="pt")
>>> with torch.inference_mode():
... image_features = model.get_image_features(**inputs)
```"""
vision_outputs: BaseModelOutputWithPooling = self.vision_model(pixel_values)
image_features = self.visual_projection(vision_outputs.pooler_output)
return image_features
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, pixel_values: Optional[torch.FloatTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, return_loss: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, output_segmentation: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, GroupViTModelOutput]:
"""
return_loss (`bool`, *optional*):
Whether or not to return the contrastive loss.
output_segmentation (`bool`, *optional*):
Whether or not to return the segmentation logits.
Examples:
```python
>>> from PIL import Image
>>> import requests
>>> from transformers import AutoProcessor, GroupViTModel
>>> model = GroupViTModel.from_pretrained("nvidia/groupvit-gcc-yfcc")
>>> processor = AutoProcessor.from_pretrained("nvidia/groupvit-gcc-yfcc")
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> inputs = processor(
... text=["a photo of a cat", "a photo of a dog"], images=image, return_tensors="pt", padding=True
... )
>>> outputs = model(**inputs)
>>> logits_per_image = outputs.logits_per_image # this is the image-text similarity score
>>> probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_segmentation = output_segmentation if output_segmentation is not None else self.config.output_segmentation
if output_segmentation:
output_attentions = True
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
vision_outputs = self.vision_model(pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
text_outputs = self.text_model(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
image_embeds = vision_outputs[1]
image_embeds = self.visual_projection(image_embeds)
text_embeds = text_outputs[1]
text_embeds = self.text_projection(text_embeds)
image_embeds = image_embeds / image_embeds.norm(dim=-1, keepdim=True)
text_embeds = text_embeds / text_embeds.norm(dim=-1, keepdim=True)
logit_scale = self.logit_scale.exp()
logits_per_text = torch.matmul(text_embeds, image_embeds.t()) * logit_scale
logits_per_image = logits_per_text.t()
seg_logits = None
if output_segmentation:
image_group_embeds = vision_outputs[0]
image_group_embeds = self.visual_projection(image_group_embeds.reshape(-1, image_group_embeds.shape[-1]))
if output_hidden_states:
attentions = vision_outputs[3]
else:
attentions = vision_outputs[2]
grouping = get_grouping_from_attentions(attentions, pixel_values.shape[2:])
image_group_embeds = image_group_embeds / image_group_embeds.norm(dim=-1, keepdim=True)
logits_per_image_group = torch.matmul(image_group_embeds, text_embeds.t()) * logit_scale
logits_per_image_group = logits_per_image_group.reshape(image_embeds.shape[0], -1, text_embeds.shape[0]).permute(0, 2, 1)
flatten_grouping = grouping.reshape(grouping.shape[0], grouping.shape[1], -1)
seg_logits = torch.matmul(logits_per_image_group, flatten_grouping) * logit_scale
seg_logits = seg_logits.reshape(seg_logits.shape[0], seg_logits.shape[1], grouping.shape[2], grouping.shape[3])
loss = None
if return_loss:
loss = groupvit_loss(logits_per_text)
if not return_dict:
if seg_logits is not None:
output = (logits_per_image, logits_per_text, seg_logits, text_embeds, image_embeds, text_outputs, vision_outputs)
else:
output = (logits_per_image, logits_per_text, text_embeds, image_embeds, text_outputs, vision_outputs)
return (loss,) + output if loss is not None else output
return GroupViTModelOutput(loss=loss, logits_per_image=logits_per_image, logits_per_text=logits_per_text, segmentation_logits=seg_logits, text_embeds=text_embeds, image_embeds=image_embeds, text_model_output=text_outputs, vision_model_output=vision_outputs)
|
@auto_docstring
class GroupViTModel(GroupViTPreTrainedModel):
def __init__(self, config: GroupViTConfig):
pass
@filter_out_non_signature_kwargs()
@auto_docstring
def get_text_features(self, input_ids: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None) -> torch.FloatTensor:
'''
Returns:
text_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by
applying the projection layer to the pooled output of [`GroupViTTextModel`].
Examples:
```python
>>> import torch
>>> from transformers import CLIPTokenizer, GroupViTModel
>>> model = GroupViTModel.from_pretrained("nvidia/groupvit-gcc-yfcc")
>>> tokenizer = CLIPTokenizer.from_pretrained("nvidia/groupvit-gcc-yfcc")
>>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="pt")
>>> with torch.inference_mode():
... text_features = model.get_text_features(**inputs)
```'''
pass
@filter_out_non_signature_kwargs()
@auto_docstring
def get_image_features(self, pixel_values: torch.Tensor) -> torch.FloatTensor:
'''
Returns:
image_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The image embeddings obtained by
applying the projection layer to the pooled output of [`GroupViTVisionModel`].
Examples:
```python
>>> import torch
>>> from transformers import AutoProcessor, GroupViTModel
>>> from transformers.image_utils import load_image
>>> model = GroupViTModel.from_pretrained("nvidia/groupvit-gcc-yfcc")
>>> processor = AutoProcessor.from_pretrained("nvidia/groupvit-gcc-yfcc")
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = load_image(url)
>>> inputs = processor(images=image, return_tensors="pt")
>>> with torch.inference_mode():
... image_features = model.get_image_features(**inputs)
```'''
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, pixel_values: Optional[torch.FloatTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, return_loss: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, output_segmentation: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, GroupViTModelOutput]:
'''
return_loss (`bool`, *optional*):
Whether or not to return the contrastive loss.
output_segmentation (`bool`, *optional*):
Whether or not to return the segmentation logits.
Examples:
```python
>>> from PIL import Image
>>> import requests
>>> from transformers import AutoProcessor, GroupViTModel
>>> model = GroupViTModel.from_pretrained("nvidia/groupvit-gcc-yfcc")
>>> processor = AutoProcessor.from_pretrained("nvidia/groupvit-gcc-yfcc")
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> inputs = processor(
... text=["a photo of a cat", "a photo of a dog"], images=image, return_tensors="pt", padding=True
... )
>>> outputs = model(**inputs)
>>> logits_per_image = outputs.logits_per_image # this is the image-text similarity score
>>> probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities
```'''
pass
| 11
| 3
| 69
| 11
| 43
| 16
| 6
| 0.35
| 1
| 11
| 6
| 0
| 4
| 9
| 4
| 5
| 284
| 46
| 177
| 66
| 143
| 62
| 77
| 38
| 72
| 12
| 2
| 2
| 23
|
2,925
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/groupvit/modeling_groupvit.py
|
transformers.models.groupvit.modeling_groupvit.GroupViTModelOutput
|
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling
from ...utils import ModelOutput, auto_docstring, filter_out_non_signature_kwargs, logging, torch_int
import torch
from typing import Any, Optional, Union
from dataclasses import dataclass
@dataclass
@auto_docstring
class GroupViTModelOutput(ModelOutput):
"""
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`):
Contrastive loss for image-text similarity.
logits_per_image (`torch.FloatTensor` of shape `(image_batch_size, text_batch_size)`):
The scaled dot product scores between `image_embeds` and `text_embeds`. This represents the image-text
similarity scores.
logits_per_text (`torch.FloatTensor` of shape `(text_batch_size, image_batch_size)`):
The scaled dot product scores between `text_embeds` and `image_embeds`. This represents the text-image
similarity scores.
segmentation_logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels, logits_height, logits_width)`):
Classification scores for each pixel.
<Tip warning={true}>
The logits returned do not necessarily have the same size as the `pixel_values` passed as inputs. This is
to avoid doing two interpolations and lose some quality when a user needs to resize the logits to the
original image size as post-processing. You should always check your logits shape and resize as needed.
</Tip>
text_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim`):
The text embeddings obtained by applying the projection layer to the pooled output of
[`GroupViTTextModel`].
image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim`):
The image embeddings obtained by applying the projection layer to the pooled output of
[`GroupViTVisionModel`].
text_model_output (`BaseModelOutputWithPooling`):
The output of the [`GroupViTTextModel`].
vision_model_output (`BaseModelOutputWithPooling`):
The output of the [`GroupViTVisionModel`].
"""
loss: Optional[torch.FloatTensor] = None
logits_per_image: Optional[torch.FloatTensor] = None
logits_per_text: Optional[torch.FloatTensor] = None
segmentation_logits: Optional[torch.FloatTensor] = None
text_embeds: Optional[torch.FloatTensor] = None
image_embeds: Optional[torch.FloatTensor] = None
text_model_output: BaseModelOutputWithPooling = None
vision_model_output: BaseModelOutputWithPooling = None
def to_tuple(self) -> tuple[Any]:
return tuple((self[k] if k not in ['text_model_output', 'vision_model_output'] else getattr(self, k).to_tuple() for k in self.keys()))
|
@dataclass
@auto_docstring
class GroupViTModelOutput(ModelOutput):
'''
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`):
Contrastive loss for image-text similarity.
logits_per_image (`torch.FloatTensor` of shape `(image_batch_size, text_batch_size)`):
The scaled dot product scores between `image_embeds` and `text_embeds`. This represents the image-text
similarity scores.
logits_per_text (`torch.FloatTensor` of shape `(text_batch_size, image_batch_size)`):
The scaled dot product scores between `text_embeds` and `image_embeds`. This represents the text-image
similarity scores.
segmentation_logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels, logits_height, logits_width)`):
Classification scores for each pixel.
<Tip warning={true}>
The logits returned do not necessarily have the same size as the `pixel_values` passed as inputs. This is
to avoid doing two interpolations and lose some quality when a user needs to resize the logits to the
original image size as post-processing. You should always check your logits shape and resize as needed.
</Tip>
text_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim`):
The text embeddings obtained by applying the projection layer to the pooled output of
[`GroupViTTextModel`].
image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim`):
The image embeddings obtained by applying the projection layer to the pooled output of
[`GroupViTVisionModel`].
text_model_output (`BaseModelOutputWithPooling`):
The output of the [`GroupViTTextModel`].
vision_model_output (`BaseModelOutputWithPooling`):
The output of the [`GroupViTVisionModel`].
'''
def to_tuple(self) -> tuple[Any]:
pass
| 4
| 1
| 5
| 0
| 5
| 0
| 2
| 2
| 1
| 2
| 0
| 0
| 1
| 0
| 1
| 1
| 48
| 6
| 14
| 10
| 12
| 28
| 11
| 10
| 9
| 2
| 1
| 0
| 2
|
2,926
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/groupvit/modeling_groupvit.py
|
transformers.models.groupvit.modeling_groupvit.GroupViTPatchEmbeddings
|
from typing import Any, Optional, Union
import torch
from torch import nn
import collections.abc
class GroupViTPatchEmbeddings(nn.Module):
"""
Image to Patch Embedding.
"""
def __init__(self, image_size: int=224, patch_size: Union[int, tuple[int, int]]=16, num_channels: int=3, embed_dim: int=768):
super().__init__()
image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size)
patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size)
num_patches = image_size[1] // patch_size[1] * (image_size[0] // patch_size[0])
self.image_size = image_size
self.patch_size = patch_size
self.num_patches = num_patches
self.projection = nn.Conv2d(num_channels, embed_dim, kernel_size=patch_size, stride=patch_size)
def forward(self, pixel_values: torch.Tensor, interpolate_pos_encoding: bool=False) -> torch.Tensor:
batch_size, num_channels, height, width = pixel_values.shape
if not interpolate_pos_encoding:
if height != self.image_size[0] or width != self.image_size[1]:
raise ValueError(f"Input image size ({height}*{width}) doesn't match model ({self.image_size[0]}*{self.image_size[1]}).")
x = self.projection(pixel_values).flatten(2).transpose(1, 2)
return x
|
class GroupViTPatchEmbeddings(nn.Module):
'''
Image to Patch Embedding.
'''
def __init__(self, image_size: int=224, patch_size: Union[int, tuple[int, int]]=16, num_channels: int=3, embed_dim: int=768):
pass
def forward(self, pixel_values: torch.Tensor, interpolate_pos_encoding: bool=False) -> torch.Tensor:
pass
| 3
| 1
| 13
| 1
| 13
| 0
| 3
| 0.12
| 1
| 6
| 0
| 0
| 2
| 4
| 2
| 12
| 32
| 3
| 26
| 16
| 17
| 3
| 17
| 10
| 14
| 3
| 1
| 2
| 6
|
2,927
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/groupvit/modeling_groupvit.py
|
transformers.models.groupvit.modeling_groupvit.GroupViTPreTrainedModel
|
from ...modeling_utils import PreTrainedModel
from ...utils import ModelOutput, auto_docstring, filter_out_non_signature_kwargs, logging, torch_int
from torch import nn
from .configuration_groupvit import GroupViTConfig, GroupViTTextConfig, GroupViTVisionConfig
@auto_docstring
class GroupViTPreTrainedModel(PreTrainedModel):
config: GroupViTConfig
base_model_prefix = 'groupvit'
supports_gradient_checkpointing = True
def _init_weights(self, module):
"""Initialize the weights"""
init_range = self.config.initializer_range
if isinstance(module, (nn.Linear, nn.Conv2d)):
module.weight.data.normal_(mean=0.0, std=init_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
factor = self.config.initializer_factor
if isinstance(module, GroupViTTextEmbeddings):
module.token_embedding.weight.data.normal_(mean=0.0, std=factor * 0.02)
module.position_embedding.weight.data.normal_(mean=0.0, std=factor * 0.02)
elif isinstance(module, GroupViTAttention):
factor = self.config.initializer_factor
in_proj_std = module.embed_dim ** (-0.5) * (2 * module.config.num_hidden_layers) ** (-0.5) * factor
out_proj_std = module.embed_dim ** (-0.5) * factor
nn.init.normal_(module.q_proj.weight, std=in_proj_std)
nn.init.normal_(module.k_proj.weight, std=in_proj_std)
nn.init.normal_(module.v_proj.weight, std=in_proj_std)
nn.init.normal_(module.out_proj.weight, std=out_proj_std)
elif isinstance(module, GroupViTMLP):
factor = self.config.initializer_factor
in_proj_std = module.config.hidden_size ** (-0.5) * (2 * module.config.num_hidden_layers) ** (-0.5) * factor
fc_std = (2 * module.config.hidden_size) ** (-0.5) * factor
nn.init.normal_(module.fc1.weight, std=fc_std)
nn.init.normal_(module.fc2.weight, std=in_proj_std)
|
@auto_docstring
class GroupViTPreTrainedModel(PreTrainedModel):
def _init_weights(self, module):
'''Initialize the weights'''
pass
| 3
| 1
| 32
| 2
| 27
| 3
| 7
| 0.23
| 1
| 3
| 3
| 3
| 1
| 0
| 1
| 1
| 42
| 4
| 31
| 10
| 29
| 7
| 28
| 10
| 26
| 7
| 1
| 2
| 7
|
2,928
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/groupvit/modeling_groupvit.py
|
transformers.models.groupvit.modeling_groupvit.GroupViTStage
|
from typing import Any, Optional, Union
from torch import nn
import torch
from .configuration_groupvit import GroupViTConfig, GroupViTTextConfig, GroupViTVisionConfig
class GroupViTStage(nn.Module):
"""This corresponds to the `GroupingLayer` class in the GroupViT implementation."""
def __init__(self, config: GroupViTVisionConfig, depth: int, num_prev_group_token: int, num_group_token: int, num_output_group: int):
super().__init__()
self.depth = depth
self.num_group_token = num_group_token
if num_group_token > 0:
self.group_token = nn.Parameter(torch.zeros(1, num_group_token, config.hidden_size))
else:
self.group_token = None
self.layers = nn.ModuleList([GroupViTEncoderLayer(config) for _ in range(depth)])
if num_group_token > 0:
self.downsample = GroupViTTokenAssign(config=config, num_group_token=num_group_token, num_output_group=num_output_group)
else:
self.downsample = None
if num_prev_group_token > 0 and num_group_token > 0:
self.group_projector = nn.Sequential(nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps), GroupViTMixerMLP(config, num_prev_group_token, config.hidden_size // 2, num_group_token))
else:
self.group_projector = None
@property
def with_group_token(self):
return self.group_token is not None
def split_x(self, x):
if self.with_group_token:
return (x[:, :-self.num_group_token], x[:, -self.num_group_token:])
else:
return (x, None)
def concat_x(self, x: torch.Tensor, group_token: Optional[torch.Tensor]=None) -> torch.Tensor:
if group_token is None:
return x
return torch.cat([x, group_token], dim=1)
def forward(self, hidden_states: torch.Tensor, prev_group_token: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=False) -> tuple[torch.FloatTensor]:
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
`(config.encoder_attention_heads,)`.
output_attentions (`bool`, *optional*):
Whether or not to return the grouping tensors of Grouping block.
"""
if self.with_group_token:
group_token = self.group_token.expand(hidden_states.size(0), -1, -1)
if self.group_projector is not None:
group_token = group_token + self.group_projector(prev_group_token)
else:
group_token = None
x = hidden_states
cat_x = self.concat_x(x, group_token)
for layer in self.layers:
layer_out = layer(cat_x, attention_mask=None, causal_attention_mask=None)
cat_x = layer_out[0]
x, group_token = self.split_x(cat_x)
attention = None
if self.downsample is not None:
x, attention = self.downsample(x, group_token)
outputs = (x, group_token)
if output_attentions:
outputs = outputs + (attention,)
return outputs
|
class GroupViTStage(nn.Module):
'''This corresponds to the `GroupingLayer` class in the GroupViT implementation.'''
def __init__(self, config: GroupViTVisionConfig, depth: int, num_prev_group_token: int, num_group_token: int, num_output_group: int):
pass
@property
def with_group_token(self):
pass
def split_x(self, x):
pass
def concat_x(self, x: torch.Tensor, group_token: Optional[torch.Tensor]=None) -> torch.Tensor:
pass
def forward(self, hidden_states: torch.Tensor, prev_group_token: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=False) -> tuple[torch.FloatTensor]:
'''
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
`(config.encoder_attention_heads,)`.
output_attentions (`bool`, *optional*):
Whether or not to return the grouping tensors of Grouping block.
'''
pass
| 7
| 2
| 17
| 2
| 13
| 2
| 3
| 0.14
| 1
| 9
| 4
| 0
| 5
| 6
| 5
| 15
| 92
| 13
| 69
| 32
| 50
| 10
| 44
| 19
| 38
| 6
| 1
| 2
| 15
|
2,929
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/groupvit/modeling_groupvit.py
|
transformers.models.groupvit.modeling_groupvit.GroupViTTextEmbeddings
|
from typing import Any, Optional, Union
from torch import nn
from .configuration_groupvit import GroupViTConfig, GroupViTTextConfig, GroupViTVisionConfig
import torch
class GroupViTTextEmbeddings(nn.Module):
def __init__(self, config: GroupViTTextConfig):
super().__init__()
embed_dim = config.hidden_size
self.token_embedding = nn.Embedding(config.vocab_size, embed_dim)
self.position_embedding = nn.Embedding(config.max_position_embeddings, embed_dim)
self.register_buffer('position_ids', torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False)
def forward(self, input_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None) -> torch.Tensor:
seq_length = input_ids.shape[-1] if input_ids is not None else inputs_embeds.shape[-2]
max_position_embedding = self.position_embedding.weight.shape[0]
if seq_length > max_position_embedding:
raise ValueError(f'Sequence length must be less than max_position_embeddings (got `sequence length`: {seq_length} and max_position_embeddings: {max_position_embedding}')
if position_ids is None:
position_ids = self.position_ids[:, :seq_length]
if inputs_embeds is None:
inputs_embeds = self.token_embedding(input_ids)
position_embeddings = self.position_embedding(position_ids)
embeddings = inputs_embeds + position_embeddings
return embeddings
|
class GroupViTTextEmbeddings(nn.Module):
def __init__(self, config: GroupViTTextConfig):
pass
def forward(self, input_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None) -> torch.Tensor:
pass
| 3
| 0
| 18
| 4
| 14
| 1
| 3
| 0.03
| 1
| 4
| 1
| 0
| 2
| 2
| 2
| 12
| 38
| 8
| 29
| 15
| 21
| 1
| 19
| 10
| 16
| 5
| 1
| 1
| 6
|
2,930
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/groupvit/modeling_groupvit.py
|
transformers.models.groupvit.modeling_groupvit.GroupViTTextEncoder
|
from typing import Any, Optional, Union
import torch
from .configuration_groupvit import GroupViTConfig, GroupViTTextConfig, GroupViTVisionConfig
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling
from torch import nn
class GroupViTTextEncoder(nn.Module):
"""
Transformer encoder consisting of `config.num_hidden_layers` self-attention layers. Each layer is a
[`GroupViTEncoderLayer`].
Args:
config: GroupViTTextConfig
"""
def __init__(self, config: GroupViTTextConfig):
super().__init__()
self.config = config
self.layers = nn.ModuleList([GroupViTEncoderLayer(config) for _ in range(config.num_hidden_layers)])
self.gradient_checkpointing = False
def forward(self, inputs_embeds, attention_mask: Optional[torch.Tensor]=None, causal_attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutput]:
"""
Args:
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
causal_attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Causal mask for the text model. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
encoder_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
hidden_states = inputs_embeds
for idx, encoder_layer in enumerate(self.layers):
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
layer_outputs = encoder_layer(hidden_states, attention_mask, causal_attention_mask, output_attentions=output_attentions)
hidden_states = layer_outputs[0]
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
if not return_dict:
return tuple((v for v in [hidden_states, encoder_states, all_attentions] if v is not None))
return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions)
|
class GroupViTTextEncoder(nn.Module):
'''
Transformer encoder consisting of `config.num_hidden_layers` self-attention layers. Each layer is a
[`GroupViTEncoderLayer`].
Args:
config: GroupViTTextConfig
'''
def __init__(self, config: GroupViTTextConfig):
pass
def forward(self, inputs_embeds, attention_mask: Optional[torch.Tensor]=None, causal_attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutput]:
'''
Args:
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
causal_attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Causal mask for the text model. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
pass
| 3
| 2
| 43
| 5
| 25
| 13
| 7
| 0.61
| 1
| 9
| 3
| 0
| 2
| 3
| 2
| 12
| 95
| 13
| 51
| 19
| 40
| 31
| 27
| 11
| 24
| 12
| 1
| 2
| 13
|
2,931
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/groupvit/modeling_groupvit.py
|
transformers.models.groupvit.modeling_groupvit.GroupViTTextModel
|
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling
from torch import nn
import torch
from .configuration_groupvit import GroupViTConfig, GroupViTTextConfig, GroupViTVisionConfig
from ...utils import ModelOutput, auto_docstring, filter_out_non_signature_kwargs, logging, torch_int
from typing import Any, Optional, Union
class GroupViTTextModel(GroupViTPreTrainedModel):
config: GroupViTTextConfig
def __init__(self, config: GroupViTTextConfig):
super().__init__(config)
self.text_model = GroupViTTextTransformer(config)
self.post_init()
def get_input_embeddings(self) -> nn.Module:
return self.text_model.embeddings.token_embedding
def set_input_embeddings(self, value):
self.text_model.embeddings.token_embedding = value
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutputWithPooling]:
"""
Examples:
```python
>>> from transformers import CLIPTokenizer, GroupViTTextModel
>>> tokenizer = CLIPTokenizer.from_pretrained("nvidia/groupvit-gcc-yfcc")
>>> model = GroupViTTextModel.from_pretrained("nvidia/groupvit-gcc-yfcc")
>>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="pt")
>>> outputs = model(**inputs)
>>> last_hidden_state = outputs.last_hidden_state
>>> pooled_output = outputs.pooler_output # pooled (EOS token) states
```"""
return self.text_model(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
|
class GroupViTTextModel(GroupViTPreTrainedModel):
def __init__(self, config: GroupViTTextConfig):
pass
def get_input_embeddings(self) -> nn.Module:
pass
def set_input_embeddings(self, value):
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutputWithPooling]:
'''
Examples:
```python
>>> from transformers import CLIPTokenizer, GroupViTTextModel
>>> tokenizer = CLIPTokenizer.from_pretrained("nvidia/groupvit-gcc-yfcc")
>>> model = GroupViTTextModel.from_pretrained("nvidia/groupvit-gcc-yfcc")
>>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="pt")
>>> outputs = model(**inputs)
>>> last_hidden_state = outputs.last_hidden_state
>>> pooled_output = outputs.pooler_output # pooled (EOS token) states
```'''
pass
| 6
| 1
| 11
| 1
| 6
| 3
| 1
| 0.45
| 1
| 6
| 3
| 0
| 4
| 1
| 4
| 5
| 51
| 9
| 29
| 16
| 14
| 13
| 12
| 7
| 7
| 1
| 2
| 0
| 4
|
2,932
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/groupvit/modeling_groupvit.py
|
transformers.models.groupvit.modeling_groupvit.GroupViTTextTransformer
|
import torch
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling
from typing import Any, Optional, Union
from ...utils import ModelOutput, auto_docstring, filter_out_non_signature_kwargs, logging, torch_int
from ...modeling_attn_mask_utils import _create_4d_causal_attention_mask, _prepare_4d_attention_mask
from .configuration_groupvit import GroupViTConfig, GroupViTTextConfig, GroupViTVisionConfig
from torch import nn
class GroupViTTextTransformer(nn.Module):
def __init__(self, config: GroupViTTextConfig):
super().__init__()
self.config = config
embed_dim = config.hidden_size
self.embeddings = GroupViTTextEmbeddings(config)
self.encoder = GroupViTTextEncoder(config)
self.final_layer_norm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
self.eos_token_id = config.eos_token_id
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutputWithPooling]:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is None:
raise ValueError('You have to specify input_ids')
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
hidden_states = self.embeddings(input_ids=input_ids, position_ids=position_ids)
causal_attention_mask = _create_4d_causal_attention_mask(input_shape, hidden_states.dtype, device=hidden_states.device)
if attention_mask is not None:
attention_mask = _prepare_4d_attention_mask(attention_mask, hidden_states.dtype)
encoder_outputs = self.encoder(inputs_embeds=hidden_states, attention_mask=attention_mask, causal_attention_mask=causal_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
last_hidden_state = encoder_outputs[0]
last_hidden_state = self.final_layer_norm(last_hidden_state)
if self.eos_token_id == 2:
pooled_output = last_hidden_state[torch.arange(last_hidden_state.shape[0], device=last_hidden_state.device), input_ids.to(dtype=torch.int, device=last_hidden_state.device).argmax(dim=-1)]
else:
pooled_output = last_hidden_state[torch.arange(last_hidden_state.shape[0], device=last_hidden_state.device), (input_ids.to(dtype=torch.int, device=last_hidden_state.device) == self.eos_token_id).int().argmax(dim=-1)]
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPooling(last_hidden_state=last_hidden_state, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions)
|
class GroupViTTextTransformer(nn.Module):
def __init__(self, config: GroupViTTextConfig):
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutputWithPooling]:
pass
| 4
| 0
| 46
| 6
| 31
| 9
| 5
| 0.26
| 1
| 9
| 4
| 0
| 2
| 5
| 2
| 12
| 95
| 13
| 65
| 24
| 52
| 17
| 30
| 15
| 27
| 8
| 1
| 1
| 9
|
2,933
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/groupvit/modeling_groupvit.py
|
transformers.models.groupvit.modeling_groupvit.GroupViTTokenAssign
|
from torch import nn
import collections.abc
from .configuration_groupvit import GroupViTConfig, GroupViTTextConfig, GroupViTVisionConfig
class GroupViTTokenAssign(nn.Module):
def __init__(self, config: GroupViTVisionConfig, num_group_token, num_output_group):
super().__init__()
self.num_output_group = num_output_group
self.norm_tokens = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
assign_mlp_ratio = config.assign_mlp_ratio if isinstance(config.assign_mlp_ratio, collections.abc.Iterable) else (config.assign_mlp_ratio, config.assign_mlp_ratio)
tokens_dim, channels_dim = [int(x * config.hidden_size) for x in assign_mlp_ratio]
self.mlp_inter = GroupViTMixerMLP(config, num_group_token, tokens_dim, num_output_group)
self.norm_post_tokens = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.norm_x = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.pre_assign_attn = GroupViTCrossAttentionLayer(config)
self.assign = GroupViTAssignAttention(config)
self.norm_new_x = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.mlp_channels = GroupViTMLP(config, config.hidden_size, channels_dim, config.hidden_size)
def project_group_token(self, group_tokens):
"""
Args:
group_tokens (torch.Tensor): group tokens, [batch_size, num_group_tokens, channels]
Returns:
projected_group_tokens (torch.Tensor): [batch_size, num_output_groups, channels]
"""
projected_group_tokens = self.mlp_inter(group_tokens)
projected_group_tokens = self.norm_post_tokens(projected_group_tokens)
return projected_group_tokens
def forward(self, image_tokens, group_tokens):
"""
Args:
image_tokens (`torch.Tensor`): image tokens, of shape [batch_size, input_length, channels]
group_tokens (`torch.Tensor`): group tokens, [batch_size, num_group_tokens, channels]
"""
group_tokens = self.norm_tokens(group_tokens)
image_tokens = self.norm_x(image_tokens)
projected_group_tokens = self.project_group_token(group_tokens)
projected_group_tokens = self.pre_assign_attn(projected_group_tokens, image_tokens)
new_image_tokens, attention = self.assign(projected_group_tokens, image_tokens)
new_image_tokens += projected_group_tokens
new_image_tokens = new_image_tokens + self.mlp_channels(self.norm_new_x(new_image_tokens))
return (new_image_tokens, attention)
|
class GroupViTTokenAssign(nn.Module):
def __init__(self, config: GroupViTVisionConfig, num_group_token, num_output_group):
pass
def project_group_token(self, group_tokens):
'''
Args:
group_tokens (torch.Tensor): group tokens, [batch_size, num_group_tokens, channels]
Returns:
projected_group_tokens (torch.Tensor): [batch_size, num_output_groups, channels]
'''
pass
def forward(self, image_tokens, group_tokens):
'''
Args:
image_tokens (`torch.Tensor`): image tokens, of shape [batch_size, input_length, channels]
group_tokens (`torch.Tensor`): group tokens, [batch_size, num_group_tokens, channels]
'''
pass
| 4
| 2
| 17
| 2
| 10
| 5
| 1
| 0.48
| 1
| 8
| 5
| 0
| 3
| 9
| 3
| 13
| 53
| 7
| 31
| 18
| 27
| 15
| 27
| 18
| 23
| 2
| 1
| 0
| 4
|
2,934
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/groupvit/modeling_groupvit.py
|
transformers.models.groupvit.modeling_groupvit.GroupViTVisionEmbeddings
|
from ...utils import ModelOutput, auto_docstring, filter_out_non_signature_kwargs, logging, torch_int
from .configuration_groupvit import GroupViTConfig, GroupViTTextConfig, GroupViTVisionConfig
import torch
from torch import nn
class GroupViTVisionEmbeddings(nn.Module):
def __init__(self, config: GroupViTVisionConfig):
super().__init__()
self.patch_embeddings = GroupViTPatchEmbeddings(image_size=config.image_size, patch_size=config.patch_size, num_channels=config.num_channels, embed_dim=config.hidden_size)
num_patches = self.patch_embeddings.num_patches
self.position_embeddings = nn.Parameter(torch.zeros(1, num_patches, config.hidden_size))
self.dropout = nn.Dropout(config.dropout)
self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.patch_size = config.patch_size
self.config = config
def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor:
"""
This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution
images. This method is also adapted to support torch.jit tracing and no class embeddings.
Adapted from:
- https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174-L194, and
- https://github.com/facebookresearch/dinov2/blob/e1277af2ba9496fbadf7aec6eba56e8d882d1e35/dinov2/models/vision_transformer.py#L179-L211
"""
num_patches = embeddings.shape[1]
num_positions = self.position_embeddings.shape[1]
if not torch.jit.is_tracing() and num_patches == num_positions and (height == width):
return self.position_embeddings
patch_pos_embed = self.position_embeddings
dim = embeddings.shape[-1]
new_height = height // self.patch_size
new_width = width // self.patch_size
sqrt_num_positions = torch_int(num_positions ** 0.5)
patch_pos_embed = patch_pos_embed.reshape(1, sqrt_num_positions, sqrt_num_positions, dim)
patch_pos_embed = patch_pos_embed.permute(0, 3, 1, 2)
patch_pos_embed = nn.functional.interpolate(patch_pos_embed, size=(new_height, new_width), mode='bicubic', align_corners=False)
patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
return patch_pos_embed
def forward(self, pixel_values: torch.Tensor, interpolate_pos_encoding: bool=False) -> torch.Tensor:
batch_size, num_channels, height, width = pixel_values.shape
embeddings = self.patch_embeddings(pixel_values, interpolate_pos_encoding=interpolate_pos_encoding)
embeddings = self.layernorm(embeddings)
batch_size, seq_len, _ = embeddings.size()
if interpolate_pos_encoding:
embeddings = embeddings + self.interpolate_pos_encoding(embeddings, height, width)
else:
embeddings = embeddings + self.position_embeddings
embeddings = self.dropout(embeddings)
return embeddings
|
class GroupViTVisionEmbeddings(nn.Module):
def __init__(self, config: GroupViTVisionConfig):
pass
def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor:
'''
This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution
images. This method is also adapted to support torch.jit tracing and no class embeddings.
Adapted from:
- https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174-L194, and
- https://github.com/facebookresearch/dinov2/blob/e1277af2ba9496fbadf7aec6eba56e8d882d1e35/dinov2/models/vision_transformer.py#L179-L211
'''
pass
def forward(self, pixel_values: torch.Tensor, interpolate_pos_encoding: bool=False) -> torch.Tensor:
pass
| 4
| 1
| 23
| 5
| 15
| 3
| 2
| 0.2
| 1
| 6
| 2
| 0
| 3
| 6
| 3
| 13
| 72
| 17
| 46
| 21
| 42
| 9
| 35
| 21
| 31
| 2
| 1
| 1
| 5
|
2,935
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/groupvit/modeling_groupvit.py
|
transformers.models.groupvit.modeling_groupvit.GroupViTVisionEncoder
|
from .configuration_groupvit import GroupViTConfig, GroupViTTextConfig, GroupViTVisionConfig
from typing import Any, Optional, Union
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling
import torch
from torch import nn
class GroupViTVisionEncoder(nn.Module):
def __init__(self, config: GroupViTVisionConfig) -> None:
super().__init__()
self.config = config
self.stages = nn.ModuleList([GroupViTStage(config=config, depth=config.depths[i], num_group_token=config.num_group_tokens[i], num_output_group=config.num_output_groups[i], num_prev_group_token=config.num_output_groups[i - 1] if i > 0 else 0) for i in range(len(config.depths))])
self.gradient_checkpointing = False
def forward(self, hidden_states: torch.Tensor, output_hidden_states: Optional[bool]=None, output_attentions: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutput]:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
all_hidden_states = () if output_hidden_states else None
all_groupings = () if output_attentions else None
group_tokens = None
for i, stage in enumerate(self.stages):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_outputs = stage(hidden_states, group_tokens, output_attentions)
hidden_states = layer_outputs[0]
group_tokens = layer_outputs[1]
if output_attentions and layer_outputs[2] is not None:
all_groupings = all_groupings + (layer_outputs[2],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple((v for v in [hidden_states, all_hidden_states, all_groupings] if v is not None))
return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_groupings)
|
class GroupViTVisionEncoder(nn.Module):
def __init__(self, config: GroupViTVisionConfig) -> None:
pass
def forward(self, hidden_states: torch.Tensor, output_hidden_states: Optional[bool]=None, output_attentions: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutput]:
pass
| 3
| 0
| 27
| 4
| 23
| 0
| 7
| 0
| 1
| 9
| 3
| 0
| 2
| 3
| 2
| 12
| 56
| 9
| 47
| 17
| 38
| 0
| 26
| 11
| 23
| 11
| 1
| 2
| 13
|
2,936
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/groupvit/modeling_groupvit.py
|
transformers.models.groupvit.modeling_groupvit.GroupViTVisionModel
|
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling
from ...utils import ModelOutput, auto_docstring, filter_out_non_signature_kwargs, logging, torch_int
from typing import Any, Optional, Union
from .configuration_groupvit import GroupViTConfig, GroupViTTextConfig, GroupViTVisionConfig
import torch
class GroupViTVisionModel(GroupViTPreTrainedModel):
config: GroupViTVisionConfig
main_input_name = 'pixel_values'
def __init__(self, config: GroupViTVisionConfig):
super().__init__(config)
self.vision_model = GroupViTVisionTransformer(config)
self.post_init()
def get_input_embeddings(self) -> GroupViTPatchEmbeddings:
return self.vision_model.embeddings.patch_embeddings
@auto_docstring
def forward(self, pixel_values: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutputWithPooling]:
"""
Examples:
```python
>>> from PIL import Image
>>> import requests
>>> from transformers import AutoProcessor, GroupViTVisionModel
>>> processor = AutoProcessor.from_pretrained("nvidia/groupvit-gcc-yfcc")
>>> model = GroupViTVisionModel.from_pretrained("nvidia/groupvit-gcc-yfcc")
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> inputs = processor(images=image, return_tensors="pt")
>>> outputs = model(**inputs)
>>> last_hidden_state = outputs.last_hidden_state
>>> pooled_output = outputs.pooler_output # pooled CLS states
```"""
return self.vision_model(pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
|
class GroupViTVisionModel(GroupViTPreTrainedModel):
def __init__(self, config: GroupViTVisionConfig):
pass
def get_input_embeddings(self) -> GroupViTPatchEmbeddings:
pass
@auto_docstring
def forward(self, pixel_values: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutputWithPooling]:
'''
Examples:
```python
>>> from PIL import Image
>>> import requests
>>> from transformers import AutoProcessor, GroupViTVisionModel
>>> processor = AutoProcessor.from_pretrained("nvidia/groupvit-gcc-yfcc")
>>> model = GroupViTVisionModel.from_pretrained("nvidia/groupvit-gcc-yfcc")
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> inputs = processor(images=image, return_tensors="pt")
>>> outputs = model(**inputs)
>>> last_hidden_state = outputs.last_hidden_state
>>> pooled_output = outputs.pooler_output # pooled CLS states
```'''
pass
| 5
| 1
| 14
| 2
| 6
| 6
| 1
| 0.71
| 1
| 6
| 4
| 0
| 3
| 1
| 3
| 4
| 50
| 9
| 24
| 14
| 12
| 17
| 11
| 7
| 7
| 1
| 2
| 0
| 3
|
2,937
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/groupvit/modeling_groupvit.py
|
transformers.models.groupvit.modeling_groupvit.GroupViTVisionTransformer
|
import torch
from torch import nn
from typing import Any, Optional, Union
from .configuration_groupvit import GroupViTConfig, GroupViTTextConfig, GroupViTVisionConfig
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling
from ...utils import ModelOutput, auto_docstring, filter_out_non_signature_kwargs, logging, torch_int
class GroupViTVisionTransformer(nn.Module):
def __init__(self, config: GroupViTVisionConfig):
super().__init__()
self.config = config
embed_dim = config.hidden_size
self.embeddings = GroupViTVisionEmbeddings(config)
self.encoder = GroupViTVisionEncoder(config)
self.layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
@auto_docstring
def forward(self, pixel_values: Optional[torch.FloatTensor]=None, output_hidden_states: Optional[bool]=None, output_attentions: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutputWithPooling]:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('You have to specify pixel_values')
hidden_states = self.embeddings(pixel_values)
encoder_outputs = self.encoder(hidden_states=hidden_states, output_hidden_states=output_hidden_states, output_attentions=output_attentions, return_dict=return_dict)
last_hidden_state = encoder_outputs[0]
last_hidden_state = self.layernorm(last_hidden_state)
pooled_output = last_hidden_state.mean(dim=1)
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPooling(last_hidden_state=last_hidden_state, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions)
|
class GroupViTVisionTransformer(nn.Module):
def __init__(self, config: GroupViTVisionConfig):
pass
@auto_docstring
def forward(self, pixel_values: Optional[torch.FloatTensor]=None, output_hidden_states: Optional[bool]=None, output_attentions: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutputWithPooling]:
pass
| 4
| 0
| 26
| 5
| 20
| 2
| 4
| 0.1
| 1
| 7
| 4
| 0
| 2
| 4
| 2
| 12
| 56
| 10
| 42
| 19
| 31
| 4
| 22
| 12
| 19
| 6
| 1
| 1
| 7
|
2,938
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/helium/configuration_helium.py
|
transformers.models.helium.configuration_helium.HeliumConfig
|
from ...configuration_utils import PretrainedConfig
class HeliumConfig(PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`HeliumModel`]. It is used to instantiate an Helium
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the Helium 2b model.
e.g. [kyutai/helium-2b](https://huggingface.co/kyutai/helium-2b)
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 48000):
Vocabulary size of the Helium model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`HeliumModel`]
hidden_size (`int`, *optional*, defaults to 2560):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 7040):
Dimension of the MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 24):
Number of hidden layers in the Transformer decoder.
num_attention_heads (`int`, *optional*, defaults to 20):
Number of attention heads for each attention layer in the Transformer decoder.
num_key_value_heads (`int`, *optional*, defaults to 20):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
`num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details, check out [this
paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to
`num_attention_heads`.
head_dim (`int`, *optional*, defaults to 128):
The attention head dimension.
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The legacy activation function. It is overwritten by the `hidden_activation`.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
max_position_embeddings (`int`, *optional*, defaults to 4096):
The maximum sequence length that this model might ever be used with.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
rms_norm_eps (`float`, *optional*, defaults to 1e-08):
The epsilon used by the rms normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether to tie weight embeddings
rope_theta (`float`, *optional*, defaults to 100000.0):
The base period of the RoPE embeddings.
pad_token_id (`int`, *optional*, defaults to 3):
Padding token id.
eos_token_id (`int` | `list`, *optional*, defaults to 2):
End of stream token id.
bos_token_id (`int`, *optional*, defaults to 1):
Beginning of stream token id.
attention_bias (`bool`, *optional*, defaults to `False`):
Whether to use a bias in the query, key, value and output projection layers during self-attention.
mlp_bias (`bool`, *optional*, defaults to `False`):
Whether to use a bias in up_proj, down_proj and gate_proj layers in the MLP layers.
```python
>>> from transformers import HeliumModel, HeliumConfig
>>> # Initializing a Helium 2b style configuration
>>> configuration = HeliumConfig()
>>> # Initializing a model from the Helium 2b style configuration
>>> model = HeliumModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = 'helium'
keys_to_ignore_at_inference = ['past_key_values']
base_model_tp_plan = {'layers.*.self_attn.q_proj': 'colwise', 'layers.*.self_attn.k_proj': 'colwise', 'layers.*.self_attn.v_proj': 'colwise', 'layers.*.self_attn.o_proj': 'rowwise', 'layers.*.mlp.gate_proj': 'colwise', 'layers.*.mlp.up_proj': 'colwise', 'layers.*.mlp.down_proj': 'rowwise'}
base_model_pp_plan = {'embed_tokens': (['input_ids'], ['inputs_embeds']), 'layers': (['hidden_states', 'attention_mask'], ['hidden_states']), 'norm': (['hidden_states'], ['hidden_states'])}
def __init__(self, vocab_size=48000, hidden_size=2560, intermediate_size=7040, num_hidden_layers=24, num_attention_heads=20, num_key_value_heads=20, head_dim=128, hidden_act='silu', attention_dropout=0.0, max_position_embeddings=4096, initializer_range=0.02, rms_norm_eps=1e-08, use_cache=True, tie_word_embeddings=False, rope_theta=100000.0, pad_token_id=3, eos_token_id=2, bos_token_id=1, attention_bias=False, mlp_bias=False, **kwargs):
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.head_dim = head_dim
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.rms_norm_eps = rms_norm_eps
self.use_cache = use_cache
self.rope_theta = rope_theta
self.attention_bias = attention_bias
self.attention_dropout = attention_dropout
self.mlp_bias = mlp_bias
super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs)
|
class HeliumConfig(PretrainedConfig):
'''
This is the configuration class to store the configuration of a [`HeliumModel`]. It is used to instantiate an Helium
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the Helium 2b model.
e.g. [kyutai/helium-2b](https://huggingface.co/kyutai/helium-2b)
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 48000):
Vocabulary size of the Helium model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`HeliumModel`]
hidden_size (`int`, *optional*, defaults to 2560):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 7040):
Dimension of the MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 24):
Number of hidden layers in the Transformer decoder.
num_attention_heads (`int`, *optional*, defaults to 20):
Number of attention heads for each attention layer in the Transformer decoder.
num_key_value_heads (`int`, *optional*, defaults to 20):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
`num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details, check out [this
paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to
`num_attention_heads`.
head_dim (`int`, *optional*, defaults to 128):
The attention head dimension.
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The legacy activation function. It is overwritten by the `hidden_activation`.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
max_position_embeddings (`int`, *optional*, defaults to 4096):
The maximum sequence length that this model might ever be used with.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
rms_norm_eps (`float`, *optional*, defaults to 1e-08):
The epsilon used by the rms normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether to tie weight embeddings
rope_theta (`float`, *optional*, defaults to 100000.0):
The base period of the RoPE embeddings.
pad_token_id (`int`, *optional*, defaults to 3):
Padding token id.
eos_token_id (`int` | `list`, *optional*, defaults to 2):
End of stream token id.
bos_token_id (`int`, *optional*, defaults to 1):
Beginning of stream token id.
attention_bias (`bool`, *optional*, defaults to `False`):
Whether to use a bias in the query, key, value and output projection layers during self-attention.
mlp_bias (`bool`, *optional*, defaults to `False`):
Whether to use a bias in up_proj, down_proj and gate_proj layers in the MLP layers.
```python
>>> from transformers import HeliumModel, HeliumConfig
>>> # Initializing a Helium 2b style configuration
>>> configuration = HeliumConfig()
>>> # Initializing a model from the Helium 2b style configuration
>>> model = HeliumModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```'''
def __init__(self, vocab_size=48000, hidden_size=2560, intermediate_size=7040, num_hidden_layers=24, num_attention_heads=20, num_key_value_heads=20, head_dim=128, hidden_act='silu', attention_dropout=0.0, max_position_embeddings=4096, initializer_range=0.02, rms_norm_eps=1e-08, use_cache=True, tie_word_embeddings=False, rope_theta=100000.0, pad_token_id=3, eos_token_id=2, bos_token_id=1, attention_bias=False, mlp_bias=False, **kwargs):
pass
| 2
| 1
| 48
| 1
| 47
| 0
| 1
| 1.1
| 1
| 1
| 0
| 0
| 1
| 16
| 1
| 1
| 127
| 3
| 59
| 44
| 34
| 65
| 22
| 21
| 20
| 1
| 1
| 0
| 1
|
2,939
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/helium/modeling_helium.py
|
transformers.models.helium.modeling_helium.HeliumAttention
|
from .configuration_helium import HeliumConfig
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple
from typing import Callable, Optional, Union
from ...utils.deprecation import deprecate_kwarg
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from ...cache_utils import Cache, DynamicCache
import math
import torch.nn as nn
from ...processing_utils import Unpack
import torch
class HeliumAttention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, config: HeliumConfig, layer_idx: Optional[int]=None):
super().__init__()
self.config = config
self.layer_idx = layer_idx
self.head_dim = getattr(config, 'head_dim', config.hidden_size // config.num_attention_heads)
self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
self.scaling = 1 / math.sqrt(self.head_dim)
self.attention_dropout = config.attention_dropout
self.is_causal = True
self.q_proj = nn.Linear(config.hidden_size, config.num_attention_heads * self.head_dim, bias=config.attention_bias)
self.k_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias)
self.v_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias)
self.o_proj = nn.Linear(config.hidden_size, config.hidden_size, bias=False)
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, position_embeddings: tuple[torch.Tensor, torch.Tensor], attention_mask: Optional[torch.Tensor], past_key_values: Optional[Cache]=None, cache_position: Optional[torch.LongTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> tuple[torch.Tensor, torch.Tensor]:
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, self.head_dim)
query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2)
key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2)
value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2)
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
if past_key_values is not None:
cache_kwargs = {'sin': sin, 'cos': cos, 'cache_position': cache_position}
key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != 'eager':
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(self, query_states, key_states, value_states, attention_mask, dropout=0.0 if not self.training else self.attention_dropout, scaling=self.scaling, **kwargs)
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
attn_output = self.o_proj(attn_output)
return (attn_output, attn_weights)
|
class HeliumAttention(nn.Module):
'''Multi-headed attention from 'Attention Is All You Need' paper'''
def __init__(self, config: HeliumConfig, layer_idx: Optional[int]=None):
pass
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, position_embeddings: tuple[torch.Tensor, torch.Tensor], attention_mask: Optional[torch.Tensor], past_key_values: Optional[Cache]=None, cache_position: Optional[torch.LongTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> tuple[torch.Tensor, torch.Tensor]:
pass
| 4
| 1
| 34
| 4
| 30
| 1
| 3
| 0.03
| 1
| 6
| 3
| 0
| 2
| 11
| 2
| 12
| 72
| 9
| 61
| 31
| 50
| 2
| 34
| 23
| 31
| 5
| 1
| 2
| 6
|
2,940
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/helium/modeling_helium.py
|
transformers.models.helium.modeling_helium.HeliumDecoderLayer
|
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple
from ...utils.deprecation import deprecate_kwarg
from ...cache_utils import Cache, DynamicCache
from typing import Callable, Optional, Union
import torch.nn as nn
from ...modeling_layers import GenericForSequenceClassification, GenericForTokenClassification, GradientCheckpointingLayer
from .configuration_helium import HeliumConfig
from ...processing_utils import Unpack
import torch
class HeliumDecoderLayer(GradientCheckpointingLayer):
def __init__(self, config: HeliumConfig, layer_idx: Optional[int]=None):
super().__init__()
self.hidden_size = config.hidden_size
self.self_attn = HeliumAttention(config=config, layer_idx=layer_idx)
self.mlp = HeliumMLP(config)
self.input_layernorm = HeliumRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.post_attention_layernorm = HeliumRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, use_cache: Optional[bool]=False, cache_position: Optional[torch.LongTensor]=None, position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]]=None, **kwargs: Unpack[TransformersKwargs]) -> torch.Tensor:
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
hidden_states, _ = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, use_cache=use_cache, cache_position=cache_position, position_embeddings=position_embeddings, **kwargs)
hidden_states = residual + hidden_states
residual = hidden_states
hidden_states = self.post_attention_layernorm(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + hidden_states
return hidden_states
|
class HeliumDecoderLayer(GradientCheckpointingLayer):
def __init__(self, config: HeliumConfig, layer_idx: Optional[int]=None):
pass
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, use_cache: Optional[bool]=False, cache_position: Optional[torch.LongTensor]=None, position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]]=None, **kwargs: Unpack[TransformersKwargs]) -> torch.Tensor:
pass
| 4
| 0
| 25
| 4
| 21
| 2
| 2
| 0.07
| 1
| 10
| 6
| 0
| 2
| 5
| 2
| 12
| 52
| 8
| 42
| 22
| 28
| 3
| 21
| 11
| 18
| 2
| 1
| 1
| 3
|
2,941
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/helium/modeling_helium.py
|
transformers.models.helium.modeling_helium.HeliumForCausalLM
|
import torch
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple
from ...generation import GenerationMixin
from ...cache_utils import Cache, DynamicCache
import torch.nn as nn
from typing import Callable, Optional, Union
from ...processing_utils import Unpack
from ...modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast
@auto_docstring
class HeliumForCausalLM(HeliumPreTrainedModel, GenerationMixin):
_tied_weights_keys = ['lm_head.weight']
_tp_plan = {'lm_head': 'colwise_rep'}
_pp_plan = {'lm_head': (['hidden_states'], ['logits'])}
def __init__(self, config):
super().__init__(config)
self.model = HeliumModel(config)
self.vocab_size = config.vocab_size
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.post_init()
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, logits_to_keep: Union[int, torch.Tensor]=0, **kwargs: Unpack[TransformersKwargs]) -> CausalLMOutputWithPast:
"""
Example:
```python
>>> from transformers import AutoTokenizer, HeliumForCausalLM
>>> model = HeliumForCausalLM.from_pretrained("google/helium-7b")
>>> tokenizer = AutoTokenizer.from_pretrained("google/helium-7b")
>>> prompt = "What is your favorite condiment?"
>>> inputs = tokenizer(prompt, return_tensors="pt")
>>> # Generate
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"What is your favorite condiment?"
```"""
outputs: BaseModelOutputWithPast = self.model(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, cache_position=cache_position, **kwargs)
hidden_states = outputs.last_hidden_state
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :])
loss = None
if labels is not None:
loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.vocab_size, **kwargs)
return CausalLMOutputWithPast(loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
|
@auto_docstring
class HeliumForCausalLM(HeliumPreTrainedModel, GenerationMixin):
def __init__(self, config):
pass
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, logits_to_keep: Union[int, torch.Tensor]=0, **kwargs: Unpack[TransformersKwargs]) -> CausalLMOutputWithPast:
'''
Example:
```python
>>> from transformers import AutoTokenizer, HeliumForCausalLM
>>> model = HeliumForCausalLM.from_pretrained("google/helium-7b")
>>> tokenizer = AutoTokenizer.from_pretrained("google/helium-7b")
>>> prompt = "What is your favorite condiment?"
>>> inputs = tokenizer(prompt, return_tensors="pt")
>>> # Generate
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"What is your favorite condiment?"
```'''
pass
| 6
| 1
| 14
| 2
| 9
| 4
| 2
| 0.38
| 2
| 10
| 5
| 0
| 8
| 3
| 8
| 9
| 123
| 21
| 74
| 36
| 47
| 28
| 36
| 20
| 27
| 8
| 2
| 1
| 15
|
2,942
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/helium/modeling_helium.py
|
transformers.models.helium.modeling_helium.HeliumForSequenceClassification
|
from ...modeling_layers import GenericForSequenceClassification, GenericForTokenClassification, GradientCheckpointingLayer
class HeliumForSequenceClassification(GenericForSequenceClassification, HeliumPreTrainedModel):
pass
|
class HeliumForSequenceClassification(GenericForSequenceClassification, HeliumPreTrainedModel):
pass
| 1
| 0
| 21
| 2
| 17
| 2
| 3
| 0.11
| 1
| 8
| 4
| 0
| 4
| 3
| 4
| 5
| 90
| 11
| 71
| 31
| 53
| 8
| 36
| 18
| 31
| 9
| 2
| 1
| 12
|
2,943
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/helium/modeling_helium.py
|
transformers.models.helium.modeling_helium.HeliumForTokenClassification
|
from ...modeling_layers import GenericForSequenceClassification, GenericForTokenClassification, GradientCheckpointingLayer
class HeliumForTokenClassification(GenericForTokenClassification, HeliumPreTrainedModel):
pass
|
class HeliumForTokenClassification(GenericForTokenClassification, HeliumPreTrainedModel):
pass
| 1
| 0
| 17
| 1
| 14
| 2
| 3
| 0.11
| 1
| 6
| 3
| 0
| 4
| 4
| 4
| 5
| 79
| 8
| 64
| 28
| 41
| 7
| 29
| 15
| 24
| 5
| 2
| 1
| 10
|
2,944
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/helium/modeling_helium.py
|
transformers.models.helium.modeling_helium.HeliumMLP
|
from ...activations import ACT2FN
import torch.nn as nn
class HeliumMLP(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.hidden_size = config.hidden_size
self.intermediate_size = config.intermediate_size
self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=config.mlp_bias)
self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=config.mlp_bias)
self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=config.mlp_bias)
self.act_fn = ACT2FN[config.hidden_act]
def forward(self, x):
down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
return down_proj
|
class HeliumMLP(nn.Module):
def __init__(self, config):
pass
def forward(self, x):
pass
| 3
| 0
| 6
| 0
| 6
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 2
| 7
| 2
| 12
| 14
| 1
| 13
| 11
| 10
| 0
| 13
| 11
| 10
| 1
| 1
| 0
| 2
|
2,945
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/helium/modeling_helium.py
|
transformers.models.helium.modeling_helium.HeliumModel
|
import torch.nn as nn
from ...modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast
from ...utils.generic import check_model_inputs
from ...masking_utils import create_causal_mask
import torch
from .configuration_helium import HeliumConfig
from ...cache_utils import Cache, DynamicCache
from ...processing_utils import Unpack
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple
from typing import Callable, Optional, Union
@auto_docstring
class HeliumModel(HeliumPreTrainedModel):
def __init__(self, config: HeliumConfig):
super().__init__(config)
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
self.layers = nn.ModuleList([HeliumDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)])
self.norm = HeliumRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.rotary_emb = HeliumRotaryEmbedding(config)
self.gradient_checkpointing = False
self.post_init()
@check_model_inputs
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, cache_position: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, **kwargs: Unpack[TransformersKwargs]) -> BaseModelOutputWithPast:
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError('You must specify exactly one of input_ids or inputs_embeds')
if inputs_embeds is None:
inputs_embeds: torch.Tensor = self.embed_tokens(input_ids)
if use_cache and past_key_values is None:
past_key_values = DynamicCache(config=self.config)
if cache_position is None:
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
cache_position: torch.Tensor = torch.arange(past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device)
if position_ids is None:
position_ids = cache_position.unsqueeze(0)
causal_mask = create_causal_mask(config=self.config, input_embeds=inputs_embeds, attention_mask=attention_mask, cache_position=cache_position, past_key_values=past_key_values, position_ids=position_ids)
hidden_states = inputs_embeds
position_embeddings = self.rotary_emb(hidden_states, position_ids)
for decoder_layer in self.layers[:self.config.num_hidden_layers]:
hidden_states = decoder_layer(hidden_states, attention_mask=causal_mask, position_ids=position_ids, past_key_values=past_key_values, cache_position=cache_position, position_embeddings=position_embeddings, **kwargs)
hidden_states = self.norm(hidden_states)
return BaseModelOutputWithPast(last_hidden_state=hidden_states, past_key_values=past_key_values)
|
@auto_docstring
class HeliumModel(HeliumPreTrainedModel):
def __init__(self, config: HeliumConfig):
pass
@check_model_inputs
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, cache_position: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, **kwargs: Unpack[TransformersKwargs]) -> BaseModelOutputWithPast:
pass
| 6
| 0
| 40
| 5
| 30
| 6
| 6
| 0.22
| 1
| 16
| 10
| 0
| 5
| 7
| 6
| 7
| 257
| 34
| 184
| 65
| 146
| 40
| 89
| 34
| 82
| 21
| 2
| 2
| 37
|
2,946
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/helium/modeling_helium.py
|
transformers.models.helium.modeling_helium.HeliumPreTrainedModel
|
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from .configuration_helium import HeliumConfig
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple
@auto_docstring
class HeliumPreTrainedModel(PreTrainedModel):
config: HeliumConfig
base_model_prefix = 'model'
supports_gradient_checkpointing = True
_no_split_modules = ['HeliumDecoderLayer']
_skip_keys_device_placement = ['past_key_values']
_supports_flash_attn = True
_supports_sdpa = True
_supports_flex_attn = True
_can_compile_fullgraph = True
_supports_attention_backend = True
_can_record_outputs = {'hidden_states': HeliumDecoderLayer, 'attentions': HeliumAttention}
|
@auto_docstring
class HeliumPreTrainedModel(PreTrainedModel):
pass
| 2
| 0
| 10
| 0
| 10
| 0
| 5
| 0
| 1
| 0
| 0
| 4
| 1
| 0
| 1
| 1
| 24
| 1
| 23
| 15
| 21
| 0
| 22
| 15
| 20
| 5
| 1
| 2
| 5
|
2,947
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/helium/modeling_helium.py
|
transformers.models.helium.modeling_helium.HeliumRotaryEmbedding
|
from ...modeling_rope_utils import ROPE_INIT_FUNCTIONS, dynamic_rope_update
import torch
from .configuration_helium import HeliumConfig
import torch.nn as nn
class HeliumRotaryEmbedding(nn.Module):
inv_freq: torch.Tensor
def __init__(self, config: HeliumConfig, device=None):
super().__init__()
if hasattr(config, 'rope_scaling') and isinstance(config.rope_scaling, dict):
self.rope_type = config.rope_scaling.get('rope_type', config.rope_scaling.get('type'))
else:
self.rope_type = 'default'
self.max_seq_len_cached = config.max_position_embeddings
self.original_max_seq_len = config.max_position_embeddings
self.config = config
self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device)
self.register_buffer('inv_freq', inv_freq, persistent=False)
self.original_inv_freq = self.inv_freq
@torch.no_grad()
@dynamic_rope_update
def forward(self, x, position_ids):
inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device)
position_ids_expanded = position_ids[:, None, :].float()
device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != 'mps' else 'cpu'
with torch.autocast(device_type=device_type, enabled=False):
freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
emb = torch.cat((freqs, freqs), dim=-1)
cos = emb.cos() * self.attention_scaling
sin = emb.sin() * self.attention_scaling
return (cos.to(dtype=x.dtype), sin.to(dtype=x.dtype))
|
class HeliumRotaryEmbedding(nn.Module):
def __init__(self, config: HeliumConfig, device=None):
pass
@torch.no_grad()
@dynamic_rope_update
def forward(self, x, position_ids):
pass
| 5
| 0
| 18
| 2
| 13
| 5
| 3
| 0.35
| 1
| 4
| 1
| 0
| 3
| 7
| 3
| 13
| 59
| 8
| 40
| 21
| 35
| 14
| 38
| 20
| 34
| 3
| 1
| 1
| 8
|
2,948
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/helium/modular_helium.py
|
transformers.models.helium.modular_helium.HeliumAttention
|
import torch.nn as nn
from typing import Optional
import math
from .configuration_helium import HeliumConfig
from ..granite.modeling_granite import GraniteAttention
class HeliumAttention(GraniteAttention):
def __init__(self, config: HeliumConfig, layer_idx: Optional[int]=None):
super().__init__(config, layer_idx)
self.o_proj = nn.Linear(config.hidden_size, config.hidden_size, bias=False)
self.scaling = 1 / math.sqrt(self.head_dim)
|
class HeliumAttention(GraniteAttention):
def __init__(self, config: HeliumConfig, layer_idx: Optional[int]=None):
pass
| 2
| 0
| 4
| 0
| 4
| 0
| 1
| 0
| 1
| 3
| 1
| 0
| 1
| 2
| 1
| 13
| 5
| 0
| 5
| 4
| 3
| 0
| 5
| 4
| 3
| 1
| 2
| 0
| 1
|
2,949
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/helium/modular_helium.py
|
transformers.models.helium.modular_helium.HeliumDecoderLayer
|
from .configuration_helium import HeliumConfig
from ..llama.modeling_llama import LlamaDecoderLayer, LlamaMLP, LlamaModel, LlamaPreTrainedModel, LlamaRotaryEmbedding
from typing import Optional
class HeliumDecoderLayer(LlamaDecoderLayer):
def __init__(self, config: HeliumConfig, layer_idx: Optional[int]=None):
super().__init__(config, layer_idx)
self.mlp = HeliumMLP(config)
self.input_layernorm = HeliumRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.post_attention_layernorm = HeliumRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
|
class HeliumDecoderLayer(LlamaDecoderLayer):
def __init__(self, config: HeliumConfig, layer_idx: Optional[int]=None):
pass
| 2
| 0
| 6
| 1
| 5
| 0
| 1
| 0
| 1
| 5
| 3
| 0
| 1
| 3
| 1
| 13
| 7
| 1
| 6
| 5
| 4
| 0
| 6
| 5
| 4
| 1
| 2
| 0
| 1
|
2,950
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/helium/modular_helium.py
|
transformers.models.helium.modular_helium.HeliumForCausalLM
|
from ..gemma.modeling_gemma import GemmaForCausalLM, GemmaForSequenceClassification, GemmaForTokenClassification
class HeliumForCausalLM(GemmaForCausalLM):
pass
|
class HeliumForCausalLM(GemmaForCausalLM):
pass
| 1
| 0
| 4
| 0
| 4
| 0
| 1
| 0
| 1
| 3
| 2
| 0
| 1
| 1
| 1
| 10
| 5
| 0
| 5
| 3
| 3
| 0
| 5
| 3
| 3
| 1
| 3
| 0
| 1
|
2,951
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/helium/modular_helium.py
|
transformers.models.helium.modular_helium.HeliumForSequenceClassification
|
from ..gemma.modeling_gemma import GemmaForCausalLM, GemmaForSequenceClassification, GemmaForTokenClassification
class HeliumForSequenceClassification(GemmaForSequenceClassification):
pass
|
class HeliumForSequenceClassification(GemmaForSequenceClassification):
pass
| 1
| 0
| 4
| 0
| 4
| 0
| 1
| 0
| 1
| 3
| 2
| 0
| 1
| 1
| 1
| 6
| 5
| 0
| 5
| 3
| 3
| 0
| 5
| 3
| 3
| 1
| 3
| 0
| 1
|
2,952
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/helium/modular_helium.py
|
transformers.models.helium.modular_helium.HeliumForTokenClassification
|
from ..gemma.modeling_gemma import GemmaForCausalLM, GemmaForSequenceClassification, GemmaForTokenClassification
class HeliumForTokenClassification(GemmaForTokenClassification):
pass
|
class HeliumForTokenClassification(GemmaForTokenClassification):
pass
| 1
| 0
| 4
| 0
| 4
| 0
| 1
| 0
| 1
| 3
| 2
| 0
| 1
| 1
| 1
| 6
| 5
| 0
| 5
| 3
| 3
| 0
| 5
| 3
| 3
| 1
| 3
| 0
| 1
|
2,953
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/helium/modular_helium.py
|
transformers.models.helium.modular_helium.HeliumMLP
|
from ..llama.modeling_llama import LlamaDecoderLayer, LlamaMLP, LlamaModel, LlamaPreTrainedModel, LlamaRotaryEmbedding
class HeliumMLP(LlamaMLP):
pass
|
class HeliumMLP(LlamaMLP):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 12
| 2
| 0
| 2
| 1
| 1
| 0
| 2
| 1
| 1
| 0
| 2
| 0
| 0
|
2,954
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/helium/modular_helium.py
|
transformers.models.helium.modular_helium.HeliumModel
|
from ..llama.modeling_llama import LlamaDecoderLayer, LlamaMLP, LlamaModel, LlamaPreTrainedModel, LlamaRotaryEmbedding
from .configuration_helium import HeliumConfig
import torch.nn as nn
class HeliumModel(HeliumPreTrainedModel, LlamaModel):
def __init__(self, config: HeliumConfig):
super().__init__(config)
self.layers = nn.ModuleList([HeliumDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)])
self.norm = HeliumRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.rotary_emb = HeliumRotaryEmbedding(config)
self.gradient_checkpointing = False
self.post_init()
|
class HeliumModel(HeliumPreTrainedModel, LlamaModel):
def __init__(self, config: HeliumConfig):
pass
| 2
| 0
| 11
| 1
| 9
| 1
| 1
| 0.1
| 2
| 6
| 4
| 0
| 1
| 4
| 1
| 8
| 12
| 1
| 10
| 6
| 8
| 1
| 8
| 6
| 6
| 1
| 3
| 0
| 1
|
2,955
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/helium/modular_helium.py
|
transformers.models.helium.modular_helium.HeliumPreTrainedModel
|
from ..llama.modeling_llama import LlamaDecoderLayer, LlamaMLP, LlamaModel, LlamaPreTrainedModel, LlamaRotaryEmbedding
class HeliumPreTrainedModel(LlamaPreTrainedModel):
pass
|
class HeliumPreTrainedModel(LlamaPreTrainedModel):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 2
| 0
| 2
| 1
| 1
| 0
| 2
| 1
| 1
| 0
| 2
| 0
| 0
|
2,956
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/helium/modular_helium.py
|
transformers.models.helium.modular_helium.HeliumRMSNorm
|
import torch.nn as nn
import torch
class HeliumRMSNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-06):
super().__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.variance_epsilon = eps
def forward(self, hidden_states):
input_dtype = hidden_states.dtype
hidden_states = hidden_states.to(torch.float32)
variance = hidden_states.pow(2).mean(-1, keepdim=True)
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
return (self.weight.to(torch.float32) * hidden_states).to(input_dtype)
def extra_repr(self):
return f'{tuple(self.weight.shape)}, eps={self.variance_epsilon}'
|
class HeliumRMSNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-06):
pass
def forward(self, hidden_states):
pass
def extra_repr(self):
pass
| 4
| 0
| 4
| 0
| 4
| 0
| 1
| 0
| 1
| 2
| 0
| 0
| 3
| 2
| 3
| 13
| 15
| 2
| 13
| 8
| 9
| 0
| 13
| 8
| 9
| 1
| 1
| 0
| 3
|
2,957
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/helium/modular_helium.py
|
transformers.models.helium.modular_helium.HeliumRotaryEmbedding
|
from ..llama.modeling_llama import LlamaDecoderLayer, LlamaMLP, LlamaModel, LlamaPreTrainedModel, LlamaRotaryEmbedding
class HeliumRotaryEmbedding(LlamaRotaryEmbedding):
pass
|
class HeliumRotaryEmbedding(LlamaRotaryEmbedding):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 13
| 2
| 0
| 2
| 1
| 1
| 0
| 2
| 1
| 1
| 0
| 2
| 0
| 0
|
2,958
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/herbert/tokenization_herbert.py
|
transformers.models.herbert.tokenization_herbert.HerbertTokenizer
|
import os
from typing import Optional
from ...tokenization_utils import PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace
import json
class HerbertTokenizer(PreTrainedTokenizer):
"""
Construct a BPE tokenizer for HerBERT.
Peculiarities:
- uses BERT's pre-tokenizer: BaseTokenizer splits tokens on spaces, and also on punctuation. Each occurrence of a
punctuation character will be treated separately.
- Such pretokenized input is BPE subtokenized
This tokenizer inherits from [`XLMTokenizer`] which contains most of the methods. Users should refer to the
superclass for more information regarding methods.
"""
vocab_files_names = VOCAB_FILES_NAMES
def __init__(self, vocab_file, merges_file, tokenizer_file=None, cls_token='<s>', unk_token='<unk>', pad_token='<pad>', mask_token='<mask>', sep_token='</s>', bos_token='<s>', do_lowercase_and_remove_accent=False, additional_special_tokens=['<special0>', '<special1>', '<special2>', '<special3>', '<special4>', '<special5>', '<special6>', '<special7>', '<special8>', '<special9>'], lang2id=None, id2lang=None, **kwargs):
try:
import sacremoses
except ImportError:
raise ImportError('You need to install sacremoses to use HerbertTokenizer. See https://pypi.org/project/sacremoses/ for installation.')
self.sm = sacremoses
self.cache_moses_punct_normalizer = {}
self.cache_moses_tokenizer = {}
self.lang_with_custom_tokenizer = {'zh', 'th', 'ja'}
self.do_lowercase_and_remove_accent = do_lowercase_and_remove_accent
self.lang2id = lang2id
self.id2lang = id2lang
if lang2id is not None and id2lang is not None:
assert len(lang2id) == len(id2lang)
self.ja_word_tokenizer = None
self.zh_word_tokenizer = None
with open(vocab_file, encoding='utf-8') as vocab_handle:
self.encoder = json.load(vocab_handle)
self.decoder = {v: k for k, v in self.encoder.items()}
with open(merges_file, encoding='utf-8') as merges_handle:
merges = merges_handle.read().split('\n')[:-1]
merges = [tuple(merge.split()[:2]) for merge in merges]
self.bpe_ranks = dict(zip(merges, range(len(merges))))
self.cache = {}
super().__init__(unk_token=unk_token, bos_token=bos_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, additional_special_tokens=additional_special_tokens, lang2id=lang2id, id2lang=id2lang, do_lowercase_and_remove_accent=do_lowercase_and_remove_accent, tokenizer_file=None, **kwargs)
self.bert_pre_tokenizer = BasicTokenizer(do_lower_case=False, never_split=self.all_special_tokens, tokenize_chinese_chars=False, strip_accents=False)
@property
def do_lower_case(self):
return self.do_lowercase_and_remove_accent
def moses_punct_norm(self, text, lang):
if lang not in self.cache_moses_punct_normalizer:
punct_normalizer = self.sm.MosesPunctNormalizer(lang=lang)
self.cache_moses_punct_normalizer[lang] = punct_normalizer
else:
punct_normalizer = self.cache_moses_punct_normalizer[lang]
return punct_normalizer.normalize(text)
def moses_tokenize(self, text, lang):
if lang not in self.cache_moses_tokenizer:
moses_tokenizer = self.sm.MosesTokenizer(lang=lang)
self.cache_moses_tokenizer[lang] = moses_tokenizer
else:
moses_tokenizer = self.cache_moses_tokenizer[lang]
return moses_tokenizer.tokenize(text, return_str=False, escape=False)
def moses_pipeline(self, text, lang):
text = replace_unicode_punct(text)
text = self.moses_punct_norm(text, lang)
text = remove_non_printing_char(text)
return text
def ja_tokenize(self, text):
if self.ja_word_tokenizer is None:
try:
import Mykytea
self.ja_word_tokenizer = Mykytea.Mykytea(f"-model {os.path.expanduser('~')}/local/share/kytea/model.bin")
except (AttributeError, ImportError):
logger.error("Make sure you install KyTea (https://github.com/neubig/kytea) and it's python wrapper (https://github.com/chezou/Mykytea-python) with the following steps")
logger.error('1. git clone git@github.com:neubig/kytea.git && cd kytea')
logger.error('2. autoreconf -i')
logger.error('3. ./configure --prefix=$HOME/local')
logger.error('4. make && make install')
logger.error('5. pip install kytea')
raise
return list(self.ja_word_tokenizer.getWS(text))
@property
def vocab_size(self):
return len(self.encoder)
def get_vocab(self):
return dict(self.encoder, **self.added_tokens_encoder)
def bpe(self, token):
word = tuple(token[:-1]) + (token[-1] + '</w>',)
if token in self.cache:
return self.cache[token]
pairs = get_pairs(word)
if not pairs:
return token + '</w>'
while True:
bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float('inf')))
if bigram not in self.bpe_ranks:
break
first, second = bigram
new_word = []
i = 0
while i < len(word):
try:
j = word.index(first, i)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
i = j
if word[i] == first and i < len(word) - 1 and (word[i + 1] == second):
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if len(word) == 1:
break
else:
pairs = get_pairs(word)
word = ' '.join(word)
if word == '\n </w>':
word = '\n</w>'
self.cache[token] = word
return word
def _tokenize(self, text):
pre_tokens = self.bert_pre_tokenizer.tokenize(text)
split_tokens = []
for token in pre_tokens:
if token:
split_tokens.extend(list(self.bpe(token).split(' ')))
return split_tokens
def _convert_token_to_id(self, token):
"""Converts a token (str) in an id using the vocab."""
return self.encoder.get(token, self.encoder.get(self.unk_token))
def _convert_id_to_token(self, index):
"""Converts an index (integer) in a token (str) using the vocab."""
return self.decoder.get(index, self.unk_token)
def convert_tokens_to_string(self, tokens):
"""Converts a sequence of tokens (string) in a single string."""
out_string = ''.join(tokens).replace('</w>', ' ').strip()
return out_string
def build_inputs_with_special_tokens(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]:
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. An XLM sequence has the following format:
- single sequence: `<s> X </s>`
- pair of sequences: `<s> A </s> B </s>`
Args:
token_ids_0 (`List[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
"""
bos = [self.bos_token_id]
sep = [self.sep_token_id]
if token_ids_1 is None:
return bos + token_ids_0 + sep
return bos + token_ids_0 + sep + token_ids_1 + sep
def get_special_tokens_mask(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None, already_has_special_tokens: bool=False) -> list[int]:
"""
Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer `prepare_for_model` method.
Args:
token_ids_0 (`List[int]`):
List of IDs.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
already_has_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not the token list is already formatted with special tokens for the model.
Returns:
`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
"""
if already_has_special_tokens:
return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True)
if token_ids_1 is not None:
return [1] + [0] * len(token_ids_0) + [1] + [0] * len(token_ids_1) + [1]
return [1] + [0] * len(token_ids_0) + [1]
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]:
if not os.path.isdir(save_directory):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
merge_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'])
with open(vocab_file, 'w', encoding='utf-8') as f:
f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + '\n')
index = 0
with open(merge_file, 'w', encoding='utf-8') as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]):
if index != token_index:
logger.warning(f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive. Please check that the tokenizer is not corrupted!')
index = token_index
writer.write(' '.join(bpe_tokens) + '\n')
index += 1
return (vocab_file, merge_file)
def __getstate__(self):
state = self.__dict__.copy()
state['sm'] = None
return state
def __setstate__(self, d):
self.__dict__ = d
try:
import sacremoses
except ImportError:
raise ImportError('You need to install sacremoses to use XLMTokenizer. See https://pypi.org/project/sacremoses/ for installation.')
self.sm = sacremoses
|
class HerbertTokenizer(PreTrainedTokenizer):
'''
Construct a BPE tokenizer for HerBERT.
Peculiarities:
- uses BERT's pre-tokenizer: BaseTokenizer splits tokens on spaces, and also on punctuation. Each occurrence of a
punctuation character will be treated separately.
- Such pretokenized input is BPE subtokenized
This tokenizer inherits from [`XLMTokenizer`] which contains most of the methods. Users should refer to the
superclass for more information regarding methods.
'''
def __init__(self, vocab_file, merges_file, tokenizer_file=None, cls_token='<s>', unk_token='<unk>', pad_token='<pad>', mask_token='<mask>', sep_token='</s>', bos_token='<s>', do_lowercase_and_remove_accent=False, additional_special_tokens=['<special0>', '<special1>', '<special2>', '<special3>', '<special4>', '<special5>', '<special6>', '<special7>', '<special8>', '<special9>'], lang2id=None, id2lang=None, **kwargs):
pass
@property
def do_lower_case(self):
pass
def moses_punct_norm(self, text, lang):
pass
def moses_tokenize(self, text, lang):
pass
def moses_pipeline(self, text, lang):
pass
def ja_tokenize(self, text):
pass
@property
def vocab_size(self):
pass
def get_vocab(self):
pass
def bpe(self, token):
pass
def _tokenize(self, text):
pass
def _convert_token_to_id(self, token):
'''Converts a token (str) in an id using the vocab.'''
pass
def _convert_id_to_token(self, index):
'''Converts an index (integer) in a token (str) using the vocab.'''
pass
def convert_tokens_to_string(self, tokens):
'''Converts a sequence of tokens (string) in a single string.'''
pass
def build_inputs_with_special_tokens(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]:
'''
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. An XLM sequence has the following format:
- single sequence: `<s> X </s>`
- pair of sequences: `<s> A </s> B </s>`
Args:
token_ids_0 (`List[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
'''
pass
def get_special_tokens_mask(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None, already_has_special_tokens: bool=False) -> list[int]:
'''
Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer `prepare_for_model` method.
Args:
token_ids_0 (`List[int]`):
List of IDs.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
already_has_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not the token list is already formatted with special tokens for the model.
Returns:
`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
'''
pass
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]:
pass
def __getstate__(self):
pass
def __setstate__(self, d):
pass
| 21
| 6
| 17
| 2
| 12
| 3
| 2
| 0.31
| 1
| 14
| 1
| 0
| 19
| 16
| 19
| 108
| 368
| 54
| 240
| 102
| 182
| 74
| 163
| 62
| 140
| 10
| 3
| 3
| 46
|
2,959
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/herbert/tokenization_herbert_fast.py
|
transformers.models.herbert.tokenization_herbert_fast.HerbertTokenizerFast
|
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_herbert import HerbertTokenizer
from typing import Optional
class HerbertTokenizerFast(PreTrainedTokenizerFast):
"""
Construct a "Fast" BPE tokenizer for HerBERT (backed by HuggingFace's *tokenizers* library).
Peculiarities:
- uses BERT's pre-tokenizer: BertPreTokenizer splits tokens on spaces, and also on punctuation. Each occurrence of
a punctuation character will be treated separately.
This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the methods. Users should refer to the
superclass for more information regarding methods.
Args:
vocab_file (`str`):
Path to the vocabulary file.
merges_file (`str`):
Path to the merges file.
"""
vocab_files_names = VOCAB_FILES_NAMES
slow_tokenizer_class = HerbertTokenizer
def __init__(self, vocab_file=None, merges_file=None, tokenizer_file=None, cls_token='<s>', unk_token='<unk>', pad_token='<pad>', mask_token='<mask>', sep_token='</s>', **kwargs):
super().__init__(vocab_file, merges_file, tokenizer_file=tokenizer_file, cls_token=cls_token, unk_token=unk_token, pad_token=pad_token, mask_token=mask_token, sep_token=sep_token, **kwargs)
def build_inputs_with_special_tokens(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]:
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. An HerBERT, like BERT sequence has the following format:
- single sequence: `<s> X </s>`
- pair of sequences: `<s> A </s> B </s>`
Args:
token_ids_0 (`List[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
"""
cls = [self.cls_token_id]
sep = [self.sep_token_id]
if token_ids_1 is None:
return cls + token_ids_0 + sep
return cls + token_ids_0 + sep + token_ids_1 + sep
def get_special_tokens_mask(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None, already_has_special_tokens: bool=False) -> list[int]:
"""
Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer `prepare_for_model` method.
Args:
token_ids_0 (`List[int]`):
List of IDs.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
already_has_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not the token list is already formatted with special tokens for the model.
Returns:
`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
"""
if already_has_special_tokens:
return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True)
if token_ids_1 is None:
return [1] + [0] * len(token_ids_0) + [1]
return [1] + [0] * len(token_ids_0) + [1] + [0] * len(token_ids_1) + [1]
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]:
files = self._tokenizer.model.save(save_directory, name=filename_prefix)
return tuple(files)
|
class HerbertTokenizerFast(PreTrainedTokenizerFast):
'''
Construct a "Fast" BPE tokenizer for HerBERT (backed by HuggingFace's *tokenizers* library).
Peculiarities:
- uses BERT's pre-tokenizer: BertPreTokenizer splits tokens on spaces, and also on punctuation. Each occurrence of
a punctuation character will be treated separately.
This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the methods. Users should refer to the
superclass for more information regarding methods.
Args:
vocab_file (`str`):
Path to the vocabulary file.
merges_file (`str`):
Path to the merges file.
'''
def __init__(self, vocab_file=None, merges_file=None, tokenizer_file=None, cls_token='<s>', unk_token='<unk>', pad_token='<pad>', mask_token='<mask>', sep_token='</s>', **kwargs):
pass
def build_inputs_with_special_tokens(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]:
'''
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. An HerBERT, like BERT sequence has the following format:
- single sequence: `<s> X </s>`
- pair of sequences: `<s> A </s> B </s>`
Args:
token_ids_0 (`List[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
'''
pass
def get_special_tokens_mask(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None, already_has_special_tokens: bool=False) -> list[int]:
'''
Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer `prepare_for_model` method.
Args:
token_ids_0 (`List[int]`):
List of IDs.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
already_has_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not the token list is already formatted with special tokens for the model.
Returns:
`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
'''
pass
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]:
pass
| 5
| 3
| 21
| 2
| 10
| 8
| 2
| 0.98
| 1
| 5
| 0
| 0
| 5
| 0
| 5
| 93
| 131
| 22
| 55
| 30
| 32
| 54
| 26
| 13
| 20
| 3
| 3
| 1
| 9
|
2,960
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/hiera/configuration_hiera.py
|
transformers.models.hiera.configuration_hiera.HieraConfig
|
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
from ...configuration_utils import PretrainedConfig
class HieraConfig(BackboneConfigMixin, PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`HieraModel`]. It is used to instantiate a Hiera
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the Hiera
[facebook/hiera-base-224](https://huggingface.co/facebook/hiera-base-224) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
embed_dim (`int`, *optional*, defaults to 96):
Dimensionality of patch embedding.
image_size (`list(int)`, *optional*, defaults to `[224, 224]`):
The size (resolution) of input in the format (height, width) for images
and (frames, height, width) for videos.
patch_size (`list(int)`, *optional*, defaults to `[7, 7]`):
The size (resolution) of each patch.
patch_stride (`list(int)`, *optional*, defaults to `[4, 4]`):
The stride of the patch.
patch_padding (`list(int)`, *optional*, defaults to `[3, 3]`):
The padding of the patch.
mlp_ratio (`float`, *optional*, defaults to 4.0):
The ratio of mlp hidden dim to embedding dim.
depths (`list(int)`, *optional*, defaults to `[2, 3, 16, 3]`):
Depth of each layer in the Transformer encoder.
num_heads (`list(int)`, *optional*, defaults to `[1, 2, 4, 8]`):
Number of attention heads in each layer of the Transformer encoder.
embed_dim_multiplier (`float`, *optional*, defaults to 2.0):
The multiplier to the dimensionality of patch embedding in each layer of the Transformer encoder.
num_query_pool (`int`, *optional*, defaults to 3):
The number of query pool stages.
query_stride (`list(int)`, *optional*, defaults to `[2, 2]`):
The stride of the query pool.
masked_unit_size (`list(int)`, *optional*, defaults to `[8, 8]`):
The size of the masked unit.
masked_unit_attention (`list(bool)`, *optional*, defaults to `[True, True, False, False]`):
Whether to use masked unit attention in each layer of the Transformer encoder.
drop_path_rate (`float`, *optional*, defaults to 0.0):
The drop path rate.
num_channels (`int`, *optional*, defaults to 3):
The number of input channels.
hidden_act (`str`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder. If string, `"gelu"`, `"relu"`,
`"selu"` and `"gelu_new"` are supported.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices and
the zero_initializer for initializing all bias vectors.
layer_norm_init (`float`, *optional*, defaults to 1.0):
The initial weight value for layer normalization layers.
layer_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon used by the layer normalization layers.
decoder_hidden_size (`int`, *optional*):
Dimensionality of decoder embeddings for MAE pretraining.
decoder_depth (`int`, *optional*):
Depth of the decoder for MAE pretraining.
decoder_num_heads (`int`, *optional*):
Number of attention heads in each layer of the decoder for MAE pretraining.
normalize_pixel_loss (`bool`, *optional*, defaults to `True`):
Whether to normalize the pixel loss by the number of pixels.
mask_ratio (`float`, *optional*, defaults to 0.6):
The ratio of masked tokens in the input.
out_features (`list[str]`, *optional*):
If used as backbone, list of features to output. Can be any of `"stem"`, `"stage1"`, `"stage2"`, etc.
(depending on how many stages the model has). If unset and `out_indices` is set, will default to the
corresponding stages. If unset and `out_indices` is unset, will default to the last stage. Must be in the
same order as defined in the `stage_names` attribute.
out_indices (`list[int]`, *optional*):
If used as backbone, list of indices of features to output. Can be any of 0, 1, 2, etc. (depending on how
many stages the model has). If unset and `out_features` is set, will default to the corresponding stages.
If unset and `out_features` is unset, will default to the last stage. Must be in the
same order as defined in the `stage_names` attribute.
Example:
```python
>>> from transformers import HieraConfig, HieraModel
>>> # Initializing a Hiera hiera-base-patch16-224 style configuration
>>> configuration = HieraConfig()
>>> # Initializing a model (with random weights) from the hiera-base-patch16-224 style configuration
>>> model = HieraModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = 'hiera'
attribute_map = {'num_hidden_layers': 'num_layers'}
def __init__(self, embed_dim=96, image_size=[224, 224], patch_size=[7, 7], patch_stride=[4, 4], patch_padding=[3, 3], mlp_ratio=4.0, depths=[2, 3, 16, 3], num_heads=[1, 2, 4, 8], embed_dim_multiplier=2.0, num_query_pool=3, query_stride=[2, 2], masked_unit_size=[8, 8], masked_unit_attention=[True, True, False, False], drop_path_rate=0.0, num_channels=3, hidden_act='gelu', initializer_range=0.02, layer_norm_init=1.0, layer_norm_eps=1e-06, decoder_hidden_size=None, decoder_depth=None, decoder_num_heads=None, normalize_pixel_loss=True, mask_ratio=0.6, out_features=None, out_indices=None, **kwargs):
super().__init__(**kwargs)
if masked_unit_size[0] % query_stride[0] ** (len(depths) - 1) != 0:
raise ValueError(f'masked_unit_size[0] ({masked_unit_size[0]}) must be divisible by query_stride[0] ({query_stride[0]}) raised to the power of the number of layers ({len(depths) - 1})')
if num_query_pool >= len(depths):
raise ValueError(f'num_query_pool ({num_query_pool}) must be less than the number of layers ({len(depths)})')
self.embed_dim = embed_dim
self.image_size = image_size
self.patch_size = patch_size
self.patch_stride = patch_stride
self.patch_padding = patch_padding
self.mlp_ratio = mlp_ratio
self.depths = depths
self.num_heads = num_heads
self.num_layers = len(depths)
self.embed_dim_multiplier = embed_dim_multiplier
self.num_query_pool = num_query_pool
self.query_stride = query_stride
self.masked_unit_size = masked_unit_size
self.masked_unit_attention = masked_unit_attention
self.drop_path_rate = drop_path_rate
self.num_channels = num_channels
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.layer_norm_init = layer_norm_init
self.layer_norm_eps = layer_norm_eps
self.decoder_hidden_size = decoder_hidden_size
self.decoder_depth = decoder_depth
self.decoder_num_heads = decoder_num_heads
self.normalize_pixel_loss = normalize_pixel_loss
self.mask_ratio = mask_ratio
self.hidden_size = int(embed_dim * embed_dim_multiplier ** (len(depths) - 1))
self.stage_names = ['stem'] + [f'stage{idx}' for idx in range(1, len(depths) + 1)]
self._out_features, self._out_indices = get_aligned_output_features_output_indices(out_features=out_features, out_indices=out_indices, stage_names=self.stage_names)
|
class HieraConfig(BackboneConfigMixin, PretrainedConfig):
'''
This is the configuration class to store the configuration of a [`HieraModel`]. It is used to instantiate a Hiera
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the Hiera
[facebook/hiera-base-224](https://huggingface.co/facebook/hiera-base-224) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
embed_dim (`int`, *optional*, defaults to 96):
Dimensionality of patch embedding.
image_size (`list(int)`, *optional*, defaults to `[224, 224]`):
The size (resolution) of input in the format (height, width) for images
and (frames, height, width) for videos.
patch_size (`list(int)`, *optional*, defaults to `[7, 7]`):
The size (resolution) of each patch.
patch_stride (`list(int)`, *optional*, defaults to `[4, 4]`):
The stride of the patch.
patch_padding (`list(int)`, *optional*, defaults to `[3, 3]`):
The padding of the patch.
mlp_ratio (`float`, *optional*, defaults to 4.0):
The ratio of mlp hidden dim to embedding dim.
depths (`list(int)`, *optional*, defaults to `[2, 3, 16, 3]`):
Depth of each layer in the Transformer encoder.
num_heads (`list(int)`, *optional*, defaults to `[1, 2, 4, 8]`):
Number of attention heads in each layer of the Transformer encoder.
embed_dim_multiplier (`float`, *optional*, defaults to 2.0):
The multiplier to the dimensionality of patch embedding in each layer of the Transformer encoder.
num_query_pool (`int`, *optional*, defaults to 3):
The number of query pool stages.
query_stride (`list(int)`, *optional*, defaults to `[2, 2]`):
The stride of the query pool.
masked_unit_size (`list(int)`, *optional*, defaults to `[8, 8]`):
The size of the masked unit.
masked_unit_attention (`list(bool)`, *optional*, defaults to `[True, True, False, False]`):
Whether to use masked unit attention in each layer of the Transformer encoder.
drop_path_rate (`float`, *optional*, defaults to 0.0):
The drop path rate.
num_channels (`int`, *optional*, defaults to 3):
The number of input channels.
hidden_act (`str`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder. If string, `"gelu"`, `"relu"`,
`"selu"` and `"gelu_new"` are supported.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices and
the zero_initializer for initializing all bias vectors.
layer_norm_init (`float`, *optional*, defaults to 1.0):
The initial weight value for layer normalization layers.
layer_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon used by the layer normalization layers.
decoder_hidden_size (`int`, *optional*):
Dimensionality of decoder embeddings for MAE pretraining.
decoder_depth (`int`, *optional*):
Depth of the decoder for MAE pretraining.
decoder_num_heads (`int`, *optional*):
Number of attention heads in each layer of the decoder for MAE pretraining.
normalize_pixel_loss (`bool`, *optional*, defaults to `True`):
Whether to normalize the pixel loss by the number of pixels.
mask_ratio (`float`, *optional*, defaults to 0.6):
The ratio of masked tokens in the input.
out_features (`list[str]`, *optional*):
If used as backbone, list of features to output. Can be any of `"stem"`, `"stage1"`, `"stage2"`, etc.
(depending on how many stages the model has). If unset and `out_indices` is set, will default to the
corresponding stages. If unset and `out_indices` is unset, will default to the last stage. Must be in the
same order as defined in the `stage_names` attribute.
out_indices (`list[int]`, *optional*):
If used as backbone, list of indices of features to output. Can be any of 0, 1, 2, etc. (depending on how
many stages the model has). If unset and `out_features` is set, will default to the corresponding stages.
If unset and `out_features` is unset, will default to the last stage. Must be in the
same order as defined in the `stage_names` attribute.
Example:
```python
>>> from transformers import HieraConfig, HieraModel
>>> # Initializing a Hiera hiera-base-patch16-224 style configuration
>>> configuration = HieraConfig()
>>> # Initializing a model (with random weights) from the hiera-base-patch16-224 style configuration
>>> model = HieraModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```'''
def __init__(self, embed_dim=96, image_size=[224, 224], patch_size=[7, 7], patch_stride=[4, 4], patch_padding=[3, 3], mlp_ratio=4.0, depths=[2, 3, 16, 3], num_heads=[1, 2, 4, 8], embed_dim_multiplier=2.0, num_query_pool=3, query_stride=[2, 2], masked_unit_size=[8, 8], masked_unit_attention=[True, True, False, False], drop_path_rate=0.0, num_channels=3, hidden_act='gelu', initializer_range=0.02, layer_norm_init=1.0, layer_norm_eps=1e-06, decoder_hidden_size=None, decoder_depth=None, decoder_num_heads=None, normalize_pixel_loss=True, mask_ratio=0.6, out_features=None, out_indices=None, **kwargs):
pass
| 2
| 1
| 74
| 2
| 70
| 2
| 3
| 1.11
| 2
| 4
| 0
| 0
| 1
| 29
| 1
| 6
| 167
| 13
| 73
| 61
| 42
| 81
| 37
| 32
| 35
| 3
| 1
| 1
| 3
|
2,961
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/hiera/modeling_hiera.py
|
transformers.models.hiera.modeling_hiera.HieraBackbone
|
from torch import nn
from ...modeling_outputs import BackboneOutput, BaseModelOutput, BaseModelOutputWithPooling, ImageClassifierOutput, ModelOutput
from typing import Optional, Union
from ...utils import auto_docstring, logging, torch_int
import torch
from ...utils.backbone_utils import BackboneMixin
from .configuration_hiera import HieraConfig
@auto_docstring(custom_intro='\n Hiera backbone, to be used with frameworks like DETR and MaskFormer.\n ')
class HieraBackbone(HieraPreTrainedModel, BackboneMixin):
def __init__(self, config: HieraConfig):
super().__init__(config)
super()._init_backbone(config)
self.num_features = [config.embed_dim] + [int(config.embed_dim * config.embed_dim_multiplier ** i) for i in range(len(config.depths))]
self.embeddings = HieraEmbeddings(config, is_mae=False)
self.encoder = HieraEncoder(config)
hidden_states_norms = {}
for stage, num_channels in zip(self._out_features, self.channels):
hidden_states_norms[stage] = nn.LayerNorm(num_channels)
self.hidden_states_norms = nn.ModuleDict(hidden_states_norms)
self.post_init()
def get_input_embeddings(self):
return self.embeddings.patch_embeddings
def forward(self, pixel_values: torch.Tensor, output_hidden_states: Optional[bool]=None, output_attentions: Optional[bool]=None, return_dict: Optional[bool]=None) -> BackboneOutput:
"""
Returns:
Examples:
```python
>>> from transformers import AutoImageProcessor, AutoBackbone
>>> import torch
>>> from PIL import Image
>>> import requests
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> processor = AutoImageProcessor.from_pretrained("facebook/hiera-tiny-224-hf")
>>> model = AutoBackbone.from_pretrained(
... "facebook/hiera-tiny-224-hf", out_features=["stage1", "stage2", "stage3", "stage4"]
... )
>>> inputs = processor(image, return_tensors="pt")
>>> outputs = model(**inputs)
>>> feature_maps = outputs.feature_maps
>>> list(feature_maps[-1].shape)
[1, 768, 7, 7]
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
embedding_output, _, _ = self.embeddings(pixel_values)
outputs = self.encoder(embedding_output, head_mask=None, output_attentions=output_attentions, output_hidden_states=True, return_dict=return_dict)
hidden_states = outputs[-1]
feature_maps = ()
for stage, hidden_state in zip(self.stage_names, hidden_states):
if stage in self.out_features:
batch_size, height, width, num_channels = hidden_state.shape
hidden_state = hidden_state.view(batch_size, height * width, num_channels)
hidden_state = self.hidden_states_norms[stage](hidden_state)
hidden_state = hidden_state.view(batch_size, height, width, num_channels)
hidden_state = hidden_state.permute(0, 3, 1, 2).contiguous()
feature_maps += (hidden_state,)
if not return_dict:
output = (feature_maps,)
if output_hidden_states:
output += (outputs[1],)
if output_attentions:
output += (outputs[2],)
return output
return BackboneOutput(feature_maps=feature_maps, hidden_states=outputs[1] if output_hidden_states else None, attentions=outputs[2] if output_attentions else None)
|
@auto_docstring(custom_intro='\n Hiera backbone, to be used with frameworks like DETR and MaskFormer.\n ')
class HieraBackbone(HieraPreTrainedModel, BackboneMixin):
def __init__(self, config: HieraConfig):
pass
def get_input_embeddings(self):
pass
def forward(self, pixel_values: torch.Tensor, output_hidden_states: Optional[bool]=None, output_attentions: Optional[bool]=None, return_dict: Optional[bool]=None) -> BackboneOutput:
'''
Returns:
Examples:
```python
>>> from transformers import AutoImageProcessor, AutoBackbone
>>> import torch
>>> from PIL import Image
>>> import requests
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> processor = AutoImageProcessor.from_pretrained("facebook/hiera-tiny-224-hf")
>>> model = AutoBackbone.from_pretrained(
... "facebook/hiera-tiny-224-hf", out_features=["stage1", "stage2", "stage3", "stage4"]
... )
>>> inputs = processor(image, return_tensors="pt")
>>> outputs = model(**inputs)
>>> feature_maps = outputs.feature_maps
>>> list(feature_maps[-1].shape)
[1, 768, 7, 7]
```'''
pass
| 5
| 1
| 31
| 5
| 19
| 7
| 5
| 0.38
| 2
| 10
| 4
| 0
| 3
| 4
| 3
| 16
| 96
| 16
| 58
| 23
| 48
| 22
| 38
| 17
| 34
| 11
| 2
| 2
| 14
|
2,962
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/hiera/modeling_hiera.py
|
transformers.models.hiera.modeling_hiera.HieraDecoder
|
from .configuration_hiera import HieraConfig
from torch import nn
from typing import Optional, Union
import math
import torch
class HieraDecoder(nn.Module):
def __init__(self, config: HieraConfig):
super().__init__()
num_features = int(config.embed_dim * config.embed_dim_multiplier ** (len(config.depths) - 1))
tokens_spatial_shape = [i // s for i, s in zip(config.image_size, config.patch_stride)]
self.tokens_spatial_shape_final = [i // s ** config.num_query_pool for i, s in zip(tokens_spatial_shape, config.query_stride)]
self.mask_unit_spatial_shape_final = [i // s ** config.num_query_pool for i, s in zip(config.masked_unit_size, config.query_stride)]
self.decoder_embeddings = nn.Linear(num_features, config.decoder_hidden_size)
self.mask_token = nn.Parameter(torch.zeros(1, 1, config.decoder_hidden_size))
self.decoder_position_embeddings = nn.Parameter(torch.zeros(1, math.prod(self.tokens_spatial_shape_final), config.decoder_hidden_size))
self.decoder_block = HieraStage(config=config, hidden_size=config.decoder_hidden_size, hidden_size_output=config.decoder_hidden_size, num_heads=config.decoder_num_heads, depth=config.decoder_depth, use_mask_unit_attn=False, drop_path=[0.0] * config.decoder_depth, query_stride=[1] * config.decoder_depth, window_size=0)
self.decoder_norm = nn.LayerNorm(config.decoder_hidden_size, eps=config.layer_norm_eps)
self.pred_stride = config.patch_stride[-1] * config.query_stride[-1] ** config.num_query_pool
pred_dim = self.pred_stride ** len(config.query_stride) * config.num_channels
self.decoder_pred = nn.Linear(config.decoder_hidden_size, pred_dim)
def forward(self, encoder_hidden_states: torch.Tensor, bool_masked_pos: torch.BoolTensor, head_mask: Optional[torch.Tensor]=None, output_attentions: bool=False) -> tuple[torch.Tensor, torch.BoolTensor]:
hidden_states = self.decoder_embeddings(encoder_hidden_states)
mask_unit_height, mask_unit_width, decoder_hidden_size = hidden_states.shape[2:]
batch_size, num_mask_units = bool_masked_pos.shape
decoder_hidden_states = torch.zeros(batch_size, num_mask_units, mask_unit_height, mask_unit_width, decoder_hidden_size, device=hidden_states.device, dtype=hidden_states.dtype)
mask_tokens = self.mask_token.view(1, 1, 1, 1, -1)
bool_masked_pos = bool_masked_pos.reshape(batch_size, num_mask_units, 1, 1, 1)
bool_masked_pos = bool_masked_pos.expand(-1, -1, mask_unit_height, mask_unit_width, decoder_hidden_size)
decoder_hidden_states[bool_masked_pos] = hidden_states.flatten()
decoder_hidden_states = (1 - bool_masked_pos.float()) * mask_tokens + bool_masked_pos.float() * decoder_hidden_states
hidden_states = undo_windowing(decoder_hidden_states, self.tokens_spatial_shape_final, self.mask_unit_spatial_shape_final)
bool_masked_pos = undo_windowing(bool_masked_pos[..., 0:1], self.tokens_spatial_shape_final, self.mask_unit_spatial_shape_final)
hidden_states = hidden_states.reshape(hidden_states.shape[0], -1, hidden_states.shape[-1])
bool_masked_pos = bool_masked_pos.view(hidden_states.shape[0], -1)
hidden_states = hidden_states + self.decoder_position_embeddings
hidden_states, attn_weights = self.decoder_block(hidden_states, head_mask=head_mask, output_attentions=output_attentions)
hidden_states = self.decoder_norm(hidden_states)
hidden_states = self.decoder_pred(hidden_states)
return (hidden_states, bool_masked_pos)
|
class HieraDecoder(nn.Module):
def __init__(self, config: HieraConfig):
pass
def forward(self, encoder_hidden_states: torch.Tensor, bool_masked_pos: torch.BoolTensor, head_mask: Optional[torch.Tensor]=None, output_attentions: bool=False) -> tuple[torch.Tensor, torch.BoolTensor]:
pass
| 3
| 0
| 51
| 8
| 38
| 5
| 1
| 0.13
| 1
| 7
| 2
| 0
| 2
| 9
| 2
| 12
| 103
| 17
| 76
| 27
| 67
| 10
| 34
| 21
| 31
| 1
| 1
| 0
| 2
|
2,963
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/hiera/modeling_hiera.py
|
transformers.models.hiera.modeling_hiera.HieraDropPath
|
from typing import Optional, Union
import torch
from torch import nn
class HieraDropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
def __init__(self, drop_prob: Optional[float]=None) -> None:
super().__init__()
self.drop_prob = drop_prob
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
return drop_path(hidden_states, self.drop_prob, self.training)
def extra_repr(self) -> str:
return f'p={self.drop_prob}'
|
class HieraDropPath(nn.Module):
'''Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).'''
def __init__(self, drop_prob: Optional[float]=None) -> None:
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
def extra_repr(self) -> str:
pass
| 4
| 1
| 2
| 0
| 2
| 0
| 1
| 0.13
| 1
| 4
| 0
| 0
| 3
| 1
| 3
| 13
| 12
| 3
| 8
| 5
| 4
| 1
| 8
| 5
| 4
| 1
| 1
| 0
| 3
|
2,964
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/hiera/modeling_hiera.py
|
transformers.models.hiera.modeling_hiera.HieraEmbeddings
|
import torch
from .configuration_hiera import HieraConfig
import math
from typing import Optional, Union
from torch import nn
from ...utils import auto_docstring, logging, torch_int
class HieraEmbeddings(nn.Module):
"""
Construct position and patch embeddings.
"""
def __init__(self, config: HieraConfig, is_mae: bool=False) -> None:
super().__init__()
self.patch_stride = config.patch_stride
tokens_spatial_shape = [i // s for i, s in zip(config.image_size, config.patch_stride)]
self.mask_spatial_shape = [i // s for i, s in zip(tokens_spatial_shape, config.masked_unit_size)]
self.num_tokens = math.prod(tokens_spatial_shape)
self.is_mae = is_mae
self.patch_embeddings = HieraPatchEmbeddings(config, is_mae=is_mae)
self.position_embeddings = nn.Parameter(torch.zeros(1, self.num_tokens, config.embed_dim))
def interpolate_pos_encoding(self, embeddings: torch.Tensor, pos_embeds: torch.Tensor, height: int, width: int) -> torch.Tensor:
"""
This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution
images. This method is also adapted to support torch.jit tracing, no class embeddings, and different patch strides.
Adapted from:
- https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174-L194, and
- https://github.com/facebookresearch/dinov2/blob/e1277af2ba9496fbadf7aec6eba56e8d882d1e35/dinov2/models/vision_transformer.py#L179-L211
"""
num_patches = embeddings.shape[1]
num_positions = pos_embeds.shape[1]
if not torch.jit.is_tracing() and num_patches == num_positions and (height == width):
return pos_embeds
dim = embeddings.shape[-1]
new_height = height // self.patch_stride[0]
new_width = width // self.patch_stride[1]
sqrt_num_positions = torch_int(num_positions ** 0.5)
pos_embeds = pos_embeds.reshape(1, sqrt_num_positions, sqrt_num_positions, dim)
pos_embeds = pos_embeds.permute(0, 3, 1, 2)
pos_embeds = nn.functional.interpolate(pos_embeds, size=(new_height, new_width), mode='bicubic', align_corners=False)
pos_embeds = pos_embeds.permute(0, 2, 3, 1).view(1, -1, dim)
return pos_embeds
def get_position_embedding(self, embeddings: torch.Tensor, height: int, width: int, interpolate_pos_encoding: bool) -> torch.FloatTensor:
return self.interpolate_pos_encoding(embeddings, self.position_embeddings, height, width) if interpolate_pos_encoding else self.position_embeddings
def forward(self, pixel_values: torch.FloatTensor, noise: Optional[torch.FloatTensor]=None, interpolate_pos_encoding: bool=False) -> tuple[torch.Tensor, Optional[torch.BoolTensor], Optional[torch.LongTensor]]:
height, width = pixel_values.shape[-2:]
embeddings, bool_masked_pos, ids_restore = self.patch_embeddings(pixel_values, noise=noise)
embeddings = embeddings + self.get_position_embedding(embeddings, height, width, interpolate_pos_encoding)
return (embeddings, bool_masked_pos, ids_restore)
|
class HieraEmbeddings(nn.Module):
'''
Construct position and patch embeddings.
'''
def __init__(self, config: HieraConfig, is_mae: bool=False) -> None:
pass
def interpolate_pos_encoding(self, embeddings: torch.Tensor, pos_embeds: torch.Tensor, height: int, width: int) -> torch.Tensor:
'''
This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution
images. This method is also adapted to support torch.jit tracing, no class embeddings, and different patch strides.
Adapted from:
- https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174-L194, and
- https://github.com/facebookresearch/dinov2/blob/e1277af2ba9496fbadf7aec6eba56e8d882d1e35/dinov2/models/vision_transformer.py#L179-L211
'''
pass
def get_position_embedding(self, embeddings: torch.Tensor, height: int, width: int, interpolate_pos_encoding: bool) -> torch.FloatTensor:
pass
def forward(self, pixel_values: torch.FloatTensor, noise: Optional[torch.FloatTensor]=None, interpolate_pos_encoding: bool=False) -> tuple[torch.Tensor, Optional[torch.BoolTensor], Optional[torch.LongTensor]]:
pass
| 5
| 2
| 17
| 3
| 12
| 2
| 2
| 0.22
| 1
| 7
| 2
| 0
| 4
| 6
| 4
| 14
| 74
| 14
| 49
| 29
| 35
| 11
| 31
| 20
| 26
| 2
| 1
| 1
| 6
|
2,965
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/hiera/modeling_hiera.py
|
transformers.models.hiera.modeling_hiera.HieraEncoder
|
from ...modeling_outputs import BackboneOutput, BaseModelOutput, BaseModelOutputWithPooling, ImageClassifierOutput, ModelOutput
from .configuration_hiera import HieraConfig
from torch import nn
import math
import torch
from typing import Optional, Union
class HieraEncoder(nn.Module):
def __init__(self, config: HieraConfig) -> None:
super().__init__()
total_depth = sum(config.depths)
dpr = [x.item() for x in torch.linspace(0, config.drop_path_rate, total_depth, device='cpu')]
cumulative_depths = torch.tensor(config.depths, device='cpu').cumsum(0).tolist()
query_pool_layer = cumulative_depths[:config.num_query_pool]
query_strides = [math.prod(config.query_stride) if i in query_pool_layer else 1 for i in range(total_depth)]
self.stages = nn.ModuleList()
hidden_size = config.embed_dim
stage_ends = [0] + cumulative_depths
masked_unit_area = math.prod(config.masked_unit_size)
query_stride_area = math.prod(config.query_stride)
for idx_stage, depth in enumerate(config.depths):
hidden_size_output = int(config.embed_dim * config.embed_dim_multiplier ** idx_stage)
stage = HieraStage(config=config, depth=depth, hidden_size=hidden_size, hidden_size_output=hidden_size_output, num_heads=config.num_heads[idx_stage], drop_path=dpr[stage_ends[idx_stage]:stage_ends[idx_stage + 1]], query_stride=query_strides[stage_ends[idx_stage]:stage_ends[idx_stage + 1]], window_size=int(masked_unit_area * query_stride_area ** (-idx_stage)), use_mask_unit_attn=config.masked_unit_attention[idx_stage], stage_num=idx_stage)
hidden_size = hidden_size_output
self.stages.append(stage)
stage_size = [i // s for i, s in zip(config.image_size, config.patch_stride)]
unroll_schedule = [config.query_stride] * len(config.depths[:-1])
self.schedule = {}
for idx_stage in range(len(config.depths)):
self.schedule[idx_stage] = (unroll_schedule, stage_size)
if idx_stage < config.num_query_pool:
stage_size = [i // s for i, s in zip(stage_size, config.query_stride)]
unroll_schedule = unroll_schedule[1:]
self.gradient_checkpointing = False
def reroll(self, hidden_states: torch.Tensor, stage_idx: int, bool_masked_pos: Optional[torch.BoolTensor]=None) -> torch.Tensor:
"""
Roll the given tensor back up to spatial order assuming it's from the given block.
If no bool_masked_pos is provided returns:
- [batch_size, height, width, hidden_size]
If a bool_masked_pos is provided returns:
- [batch_size, num_mask_units, mask_unit_height, mask_unit_width, hidden_size]
"""
schedule, size = self.schedule[stage_idx]
batch_size, seq_len, hidden_size = hidden_states.shape
num_dim = len(size)
mask_unit_shape = [1] * num_dim
for strides in schedule:
hidden_states = hidden_states.view(batch_size, *strides, seq_len // math.prod(strides), *mask_unit_shape, hidden_size)
hidden_states = hidden_states.permute(0, 3, 1, 4, 2, 5, 6)
for i in range(num_dim):
mask_unit_shape[i] *= strides[i]
hidden_states = hidden_states.reshape(batch_size, -1, *mask_unit_shape, hidden_size)
seq_len = hidden_states.shape[1]
hidden_states = hidden_states.view(batch_size, seq_len, *mask_unit_shape, hidden_size)
if bool_masked_pos is not None:
return hidden_states
hidden_states = undo_windowing(hidden_states, size, mask_unit_shape)
return hidden_states
def forward(self, hidden_states: torch.Tensor, bool_masked_pos: Optional[torch.BoolTensor]=None, head_mask: Optional[torch.FloatTensor]=None, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True) -> Union[tuple, BaseModelOutput]:
all_hidden_states = () if output_hidden_states else None
all_reshaped_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
reshaped_hidden_states = self.reroll(hidden_states, stage_idx=0, bool_masked_pos=bool_masked_pos)
all_reshaped_hidden_states = all_reshaped_hidden_states + (reshaped_hidden_states,)
for i, stage_module in enumerate(self.stages):
layer_head_mask = head_mask[i] if head_mask is not None else None
layer_outputs = stage_module(hidden_states, layer_head_mask, output_attentions)
hidden_states = layer_outputs[0]
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
reshaped_hidden_states = self.reroll(hidden_states, stage_idx=i, bool_masked_pos=bool_masked_pos)
all_reshaped_hidden_states = all_reshaped_hidden_states + (reshaped_hidden_states,)
if not return_dict:
return tuple((v for v in [hidden_states, all_hidden_states, all_self_attentions, all_reshaped_hidden_states] if v is not None))
return HieraEncoderOutput(last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions, reshaped_hidden_states=all_reshaped_hidden_states)
|
class HieraEncoder(nn.Module):
def __init__(self, config: HieraConfig) -> None:
pass
def reroll(self, hidden_states: torch.Tensor, stage_idx: int, bool_masked_pos: Optional[torch.BoolTensor]=None) -> torch.Tensor:
'''
Roll the given tensor back up to spatial order assuming it's from the given block.
If no bool_masked_pos is provided returns:
- [batch_size, height, width, hidden_size]
If a bool_masked_pos is provided returns:
- [batch_size, num_mask_units, mask_unit_height, mask_unit_width, hidden_size]
'''
pass
def forward(self, hidden_states: torch.Tensor, bool_masked_pos: Optional[torch.BoolTensor]=None, head_mask: Optional[torch.FloatTensor]=None, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True) -> Union[tuple, BaseModelOutput]:
pass
| 4
| 1
| 48
| 7
| 34
| 7
| 7
| 0.21
| 1
| 12
| 4
| 0
| 3
| 3
| 3
| 13
| 147
| 24
| 102
| 44
| 88
| 21
| 67
| 34
| 63
| 11
| 1
| 2
| 20
|
2,966
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/hiera/modeling_hiera.py
|
transformers.models.hiera.modeling_hiera.HieraEncoderOutput
|
from dataclasses import dataclass
from ...modeling_outputs import BackboneOutput, BaseModelOutput, BaseModelOutputWithPooling, ImageClassifierOutput, ModelOutput
from ...utils import auto_docstring, logging, torch_int
import torch
from typing import Optional, Union
@dataclass
@auto_docstring(custom_intro="\n Hiera encoder's outputs, with potential hidden states and attentions.\n ")
class HieraEncoderOutput(ModelOutput):
"""
reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, height, width, hidden_size)`. These are the reshaped and re-rolled hidden states of the model.
Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to
include the spatial dimensions.
"""
last_hidden_state: Optional[torch.FloatTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
attentions: Optional[tuple[torch.FloatTensor, ...]] = None
reshaped_hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
|
@dataclass
@auto_docstring(custom_intro="\n Hiera encoder's outputs, with potential hidden states and attentions.\n ")
class HieraEncoderOutput(ModelOutput):
'''
reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, height, width, hidden_size)`. These are the reshaped and re-rolled hidden states of the model.
Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to
include the spatial dimensions.
'''
pass
| 3
| 1
| 0
| 0
| 0
| 0
| 0
| 4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 30
| 5
| 5
| 5
| 4
| 20
| 5
| 5
| 4
| 0
| 1
| 0
| 0
|
2,967
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/hiera/modeling_hiera.py
|
transformers.models.hiera.modeling_hiera.HieraForImageClassification
|
import torch
from ...utils import auto_docstring, logging, torch_int
from torch import nn
from .configuration_hiera import HieraConfig
from typing import Optional, Union
@auto_docstring(custom_intro="\n Hiera Model transformer with an image classification head on top (a linear layer on top of the final hidden state with\n average pooling) e.g. for ImageNet.\n\n <Tip>\n\n Note that it's possible to fine-tune Hiera on higher resolution images than the ones it has been trained on, by\n setting `interpolate_pos_encoding` to `True` in the forward of the model. This will interpolate the pre-trained\n position embeddings to the higher resolution.\n\n </Tip>\n ")
class HieraForImageClassification(HieraPreTrainedModel):
def __init__(self, config: HieraConfig) -> None:
super().__init__(config)
self.num_labels = config.num_labels
self.hiera = HieraModel(config, add_pooling_layer=True, is_mae=False)
self.classifier = nn.Linear(self.hiera.num_features, config.num_labels) if config.num_labels > 0 else nn.Identity()
self.post_init()
@auto_docstring
def forward(self, pixel_values, head_mask: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, interpolate_pos_encoding: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, HieraForImageClassificationOutput]:
"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
outputs = self.hiera(pixel_values, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, interpolate_pos_encoding=interpolate_pos_encoding, return_dict=return_dict)
pooled_output = outputs[1]
logits = self.classifier(pooled_output)
loss = None
if labels is not None:
loss = self.loss_function(labels, logits, self.config)
if not return_dict:
output = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return HieraForImageClassificationOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, reshaped_hidden_states=outputs.reshaped_hidden_states)
|
@auto_docstring(custom_intro="\n Hiera Model transformer with an image classification head on top (a linear layer on top of the final hidden state with\n average pooling) e.g. for ImageNet.\n\n <Tip>\n\n Note that it's possible to fine-tune Hiera on higher resolution images than the ones it has been trained on, by\n setting `interpolate_pos_encoding` to `True` in the forward of the model. This will interpolate the pre-trained\n position embeddings to the higher resolution.\n\n </Tip>\n ")
class HieraForImageClassification(HieraPreTrainedModel):
def __init__(self, config: HieraConfig) -> None:
pass
@auto_docstring
def forward(self, pixel_values, head_mask: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, interpolate_pos_encoding: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, HieraForImageClassificationOutput]:
'''
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
'''
pass
| 5
| 1
| 42
| 5
| 33
| 5
| 8
| 0.12
| 1
| 8
| 3
| 0
| 2
| 3
| 2
| 3
| 93
| 11
| 73
| 22
| 54
| 9
| 35
| 12
| 32
| 14
| 2
| 3
| 16
|
2,968
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/hiera/modeling_hiera.py
|
transformers.models.hiera.modeling_hiera.HieraForImageClassificationOutput
|
from dataclasses import dataclass
import torch
from ...modeling_outputs import BackboneOutput, BaseModelOutput, BaseModelOutputWithPooling, ImageClassifierOutput, ModelOutput
from ...utils import auto_docstring, logging, torch_int
from typing import Optional, Union
@dataclass
@auto_docstring(custom_intro='\n Hiera image classification outputs.\n ')
class HieraForImageClassificationOutput(ImageClassifierOutput):
"""
loss (`torch.FloatTensor` of shape `(1,)`, `optional`):
Loss value for the training task.
logits (`torch.FloatTensor` of shape `(batch_size, num_labels)`):
Prediction scores of the classification head (logits of the output layer).
hidden_states (`tuple(torch.FloatTensor)`, `optional`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, sequence_length, hidden_size)`. These are the unrolled hidden states of the model.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (`tuple(torch.FloatTensor)`, `optional`):
Tuple of `torch.FloatTensor` (one for each stage) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
reshaped_hidden_states (`tuple(torch.FloatTensor)`, `optional`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, height, width, hidden_size)`. These are the reshaped and re-rolled hidden states of the model.
Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to
include the spatial dimensions.
"""
loss: Optional[torch.FloatTensor] = None
logits: Optional[torch.FloatTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
attentions: Optional[tuple[torch.FloatTensor, ...]] = None
reshaped_hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
|
@dataclass
@auto_docstring(custom_intro='\n Hiera image classification outputs.\n ')
class HieraForImageClassificationOutput(ImageClassifierOutput):
'''
loss (`torch.FloatTensor` of shape `(1,)`, `optional`):
Loss value for the training task.
logits (`torch.FloatTensor` of shape `(batch_size, num_labels)`):
Prediction scores of the classification head (logits of the output layer).
hidden_states (`tuple(torch.FloatTensor)`, `optional`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, sequence_length, hidden_size)`. These are the unrolled hidden states of the model.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (`tuple(torch.FloatTensor)`, `optional`):
Tuple of `torch.FloatTensor` (one for each stage) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
reshaped_hidden_states (`tuple(torch.FloatTensor)`, `optional`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, height, width, hidden_size)`. These are the reshaped and re-rolled hidden states of the model.
Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to
include the spatial dimensions.
'''
pass
| 3
| 1
| 0
| 0
| 0
| 0
| 0
| 3.67
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 33
| 5
| 6
| 6
| 5
| 22
| 6
| 6
| 5
| 0
| 2
| 0
| 0
|
2,969
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/hiera/modeling_hiera.py
|
transformers.models.hiera.modeling_hiera.HieraForPreTraining
|
from ...utils import auto_docstring, logging, torch_int
import torch
from typing import Optional, Union
from torch import nn
from .configuration_hiera import HieraConfig
@auto_docstring(custom_intro='\n The Hiera Model transformer with the decoder on top for self-supervised pre-training.\n\n <Tip>\n\n Note that we provide a script to pre-train this model on custom data in our [examples\n directory](https://github.com/huggingface/transformers/tree/main/examples/pytorch/image-pretraining).\n\n </Tip>\n ')
class HieraForPreTraining(HieraPreTrainedModel):
def __init__(self, config: HieraConfig) -> None:
super().__init__(config)
self.hiera = HieraModel(config, add_pooling_layer=False, is_mae=True)
self.encoder_norm = nn.LayerNorm(self.hiera.num_features, eps=config.layer_norm_eps)
self.multiscale_fusion = HieraMultiScaleHead(config)
self.decoder = HieraDecoder(config)
self.pred_stride = self.decoder.pred_stride
self.post_init()
def get_pixel_label_2d(self, pixel_values: torch.Tensor, bool_masked_pos: torch.BoolTensor) -> torch.Tensor:
pixel_values = pixel_values.permute(0, 2, 3, 1)
size = self.pred_stride
label = pixel_values.unfold(1, size, size).unfold(2, size, size)
label = label.flatten(1, 2).flatten(2)
label = label[bool_masked_pos]
if self.config.normalize_pixel_loss:
mean = label.mean(dim=-1, keepdim=True)
var = label.var(dim=-1, keepdim=True)
label = (label - mean) / (var + 1e-06) ** 0.5
return label
def forward_loss(self, pixel_values: torch.Tensor, logits: torch.Tensor, bool_masked_pos: torch.BoolTensor):
bool_masked_pos = ~bool_masked_pos
label = self.get_pixel_label_2d(pixel_values, bool_masked_pos)
logits = logits[bool_masked_pos]
loss = (logits - label) ** 2
loss = loss.mean()
return loss
@auto_docstring
def forward(self, pixel_values: Optional[torch.Tensor]=None, noise: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, interpolate_pos_encoding: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, HieraForPreTrainingOutput]:
"""
noise (`torch.FloatTensor` of shape `(batch_size, num_mask_units)`, *optional*):
Mainly used for testing purposes to control randomness and maintain the reproducibility
Examples:
```python
>>> from transformers import AutoImageProcessor, HieraForPreTraining
>>> import torch
>>> from PIL import Image
>>> import requests
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> image_processor = AutoImageProcessor.from_pretrained("facebook/hiera-tiny-224-mae-hf")
>>> model = HieraForPreTraining.from_pretrained("facebook/hiera-tiny-224-mae-hf")
>>> inputs = image_processor(images=image, return_tensors="pt")
>>> outputs = model(**inputs)
>>> logits = outputs.logits
>>> loss = outputs.loss
>>> print(list(logits.shape))
[1, 196, 768]
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
outputs = self.hiera(pixel_values, noise=noise, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=True, interpolate_pos_encoding=interpolate_pos_encoding, return_dict=return_dict)
feature_maps = outputs[-1]
bool_masked_pos = outputs[1]
ids_to_restore = outputs[2]
feature_maps = feature_maps[1:self.hiera.config.num_query_pool + 1] + (feature_maps[-1],)
fused_hidden_states = self.multiscale_fusion(feature_maps)
fused_hidden_states = self.encoder_norm(fused_hidden_states)
logits, bool_masked_pos = self.decoder(fused_hidden_states, bool_masked_pos=bool_masked_pos, head_mask=head_mask, output_attentions=output_attentions)
loss = self.forward_loss(pixel_values, logits, bool_masked_pos)
if not return_dict:
output = (logits, bool_masked_pos, ids_to_restore)
if output_hidden_states:
output = output + (outputs[3],)
if output_attentions:
output = output + (outputs[4],)
if output_hidden_states:
output = output + (outputs[-1],)
return (loss,) + output if loss is not None else output
return HieraForPreTrainingOutput(loss=loss, logits=logits, bool_masked_pos=bool_masked_pos, ids_restore=ids_to_restore, hidden_states=outputs.hidden_states if output_hidden_states else None, attentions=outputs.attentions, reshaped_hidden_states=outputs.reshaped_hidden_states if output_hidden_states else None)
|
@auto_docstring(custom_intro='\n The Hiera Model transformer with the decoder on top for self-supervised pre-training.\n\n <Tip>\n\n Note that we provide a script to pre-train this model on custom data in our [examples\n directory](https://github.com/huggingface/transformers/tree/main/examples/pytorch/image-pretraining).\n\n </Tip>\n ')
class HieraForPreTraining(HieraPreTrainedModel):
def __init__(self, config: HieraConfig) -> None:
pass
def get_pixel_label_2d(self, pixel_values: torch.Tensor, bool_masked_pos: torch.BoolTensor) -> torch.Tensor:
pass
def forward_loss(self, pixel_values: torch.Tensor, logits: torch.Tensor, bool_masked_pos: torch.BoolTensor):
pass
@auto_docstring
def forward_loss(self, pixel_values: torch.Tensor, logits: torch.Tensor, bool_masked_pos: torch.BoolTensor):
'''
noise (`torch.FloatTensor` of shape `(batch_size, num_mask_units)`, *optional*):
Mainly used for testing purposes to control randomness and maintain the reproducibility
Examples:
```python
>>> from transformers import AutoImageProcessor, HieraForPreTraining
>>> import torch
>>> from PIL import Image
>>> import requests
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> image_processor = AutoImageProcessor.from_pretrained("facebook/hiera-tiny-224-mae-hf")
>>> model = HieraForPreTraining.from_pretrained("facebook/hiera-tiny-224-mae-hf")
>>> inputs = image_processor(images=image, return_tensors="pt")
>>> outputs = model(**inputs)
>>> logits = outputs.logits
>>> loss = outputs.loss
>>> print(list(logits.shape))
[1, 196, 768]
```'''
pass
| 7
| 1
| 32
| 4
| 20
| 8
| 4
| 0.36
| 1
| 9
| 5
| 0
| 4
| 5
| 4
| 5
| 134
| 20
| 84
| 34
| 68
| 30
| 50
| 24
| 45
| 11
| 2
| 2
| 15
|
2,970
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/hiera/modeling_hiera.py
|
transformers.models.hiera.modeling_hiera.HieraForPreTrainingOutput
|
from ...utils import auto_docstring, logging, torch_int
from ...modeling_outputs import BackboneOutput, BaseModelOutput, BaseModelOutputWithPooling, ImageClassifierOutput, ModelOutput
from dataclasses import dataclass
from typing import Optional, Union
import torch
@dataclass
@auto_docstring(custom_intro="\n Class for HieraForPreTraining's outputs, with potential hidden states and attentions.\n ")
class HieraForPreTrainingOutput(ModelOutput):
"""
loss (`torch.FloatTensor` of shape `(1,)`):
Pixel reconstruction loss.
logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, patch_size ** 2 * num_channels)`):
Pixel reconstruction logits.
bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, sequence_length)`):
Tensor indicating which patches are masked (0) and which are not (1).
ids_restore (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Tensor containing the original index of the (shuffled) masked patches.
reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
shape `(batch_size, height, width, hidden_size)`. Hidden-states of the model at the output of each layer
plus the initial embedding outputs reshaped to include the spatial dimensions.
"""
loss: Optional[torch.FloatTensor] = None
logits: Optional[torch.FloatTensor] = None
bool_masked_pos: Optional[torch.BoolTensor] = None
ids_restore: Optional[torch.LongTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor]] = None
attentions: Optional[tuple[torch.FloatTensor]] = None
reshaped_hidden_states: Optional[tuple[torch.FloatTensor]] = None
|
@dataclass
@auto_docstring(custom_intro="\n Class for HieraForPreTraining's outputs, with potential hidden states and attentions.\n ")
class HieraForPreTrainingOutput(ModelOutput):
'''
loss (`torch.FloatTensor` of shape `(1,)`):
Pixel reconstruction loss.
logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, patch_size ** 2 * num_channels)`):
Pixel reconstruction logits.
bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, sequence_length)`):
Tensor indicating which patches are masked (0) and which are not (1).
ids_restore (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Tensor containing the original index of the (shuffled) masked patches.
reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
shape `(batch_size, height, width, hidden_size)`. Hidden-states of the model at the output of each layer
plus the initial embedding outputs reshaped to include the spatial dimensions.
'''
pass
| 3
| 1
| 0
| 0
| 0
| 0
| 0
| 3
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 34
| 2
| 8
| 8
| 7
| 24
| 8
| 8
| 7
| 0
| 1
| 0
| 0
|
2,971
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/hiera/modeling_hiera.py
|
transformers.models.hiera.modeling_hiera.HieraLayer
|
from torch import nn
import torch
from typing import Optional, Union
class HieraLayer(nn.Module):
def __init__(self, config, hidden_size: int, hidden_size_output: int, num_heads: int, drop_path: float=0.0, query_stride: int=1, window_size: int=0, use_mask_unit_attn: bool=False) -> None:
super().__init__()
self.hidden_size = hidden_size
self.hidden_size_output = hidden_size_output
self.query_stride = query_stride
self.layernorm_before = nn.LayerNorm(hidden_size, eps=config.layer_norm_eps)
self.attn = HieraMaskUnitAttention(hidden_size=hidden_size, hidden_size_output=hidden_size_output, num_heads=num_heads, query_stride=query_stride, window_size=window_size, use_mask_unit_attn=use_mask_unit_attn)
self.layernorm_after = nn.LayerNorm(hidden_size_output, eps=config.layer_norm_eps)
self.mlp = HieraMlp(config, hidden_size_output)
self.drop_path = HieraDropPath(drop_path) if drop_path > 0 else nn.Identity()
if hidden_size != hidden_size_output:
self.proj = nn.Linear(hidden_size, hidden_size_output)
def forward(self, hidden_states: torch.Tensor, head_mask: Optional[torch.FloatTensor]=None, output_attentions: bool=False) -> tuple[torch.Tensor, Optional[torch.Tensor]]:
batch_size, seq_len, _ = hidden_states.shape
hidden_states_norm = self.layernorm_before(hidden_states)
if self.hidden_size != self.hidden_size_output:
hidden_states = self.proj(hidden_states_norm)
hidden_states = hidden_states.view(batch_size, self.query_stride, -1, self.hidden_size_output).max(dim=1).values
hidden_states_norm, attn_weights = self.attn(hidden_states_norm, head_mask, output_attentions=output_attentions)
hidden_states = hidden_states + self.drop_path(hidden_states_norm)
residual = hidden_states
hidden_states = self.layernorm_after(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + self.drop_path(hidden_states)
return (hidden_states, attn_weights)
|
class HieraLayer(nn.Module):
def __init__(self, config, hidden_size: int, hidden_size_output: int, num_heads: int, drop_path: float=0.0, query_stride: int=1, window_size: int=0, use_mask_unit_attn: bool=False) -> None:
pass
def forward(self, hidden_states: torch.Tensor, head_mask: Optional[torch.FloatTensor]=None, output_attentions: bool=False) -> tuple[torch.Tensor, Optional[torch.Tensor]]:
pass
| 3
| 0
| 30
| 4
| 26
| 1
| 3
| 0.04
| 1
| 8
| 3
| 0
| 2
| 9
| 2
| 12
| 62
| 8
| 52
| 31
| 34
| 2
| 26
| 16
| 23
| 3
| 1
| 1
| 5
|
2,972
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/hiera/modeling_hiera.py
|
transformers.models.hiera.modeling_hiera.HieraMaskUnitAttention
|
from typing import Optional, Union
import torch
from torch import nn
class HieraMaskUnitAttention(nn.Module):
"""
Computes either Mask Unit or Global Attention. Also is able to perform query pooling.
Note: this assumes the tokens have already been flattened and unrolled into mask units.
"""
def __init__(self, hidden_size: int, hidden_size_output: int, num_heads: int, query_stride: int=1, window_size: int=0, use_mask_unit_attn: bool=False) -> None:
super().__init__()
self.num_heads = num_heads
self.query_stride = query_stride
self.hidden_size_output = hidden_size_output
self.head_dim = hidden_size_output // num_heads
self.scale = self.head_dim ** (-0.5)
self.qkv = nn.Linear(hidden_size, 3 * hidden_size_output)
self.proj = nn.Linear(hidden_size_output, hidden_size_output)
self.window_size = window_size
self.use_mask_unit_attn = use_mask_unit_attn
def forward(self, hidden_states: torch.Tensor, head_mask: Optional[torch.FloatTensor]=None, output_attentions: bool=False) -> tuple[torch.Tensor, Optional[torch.Tensor]]:
"""Input should be of shape [batch, tokens, channels]."""
batch_size, seq_len, _ = hidden_states.shape
num_windows = 1
if self.use_mask_unit_attn:
num_windows = seq_len // (self.query_stride * self.window_size)
qkv = self.qkv(hidden_states)
qkv = qkv.reshape(batch_size, -1, num_windows, 3, self.num_heads, self.head_dim)
qkv = qkv.permute(3, 0, 4, 2, 1, 5)
query, key, value = qkv.unbind(0)
if self.query_stride > 1:
query = query.view(batch_size, self.num_heads, num_windows, self.query_stride, -1, self.head_dim)
query = query.max(dim=3).values
attn_weights = query * self.scale @ key.transpose(-1, -2)
attn_weights = attn_weights.softmax(dim=-1)
if head_mask is not None:
attn_weights = attn_weights * head_mask
attn_output = attn_weights @ value
attn_output = attn_output.transpose(1, 3).reshape(batch_size, -1, self.hidden_size_output)
attn_output = self.proj(attn_output)
return (attn_output, attn_weights) if output_attentions else (attn_output, None)
|
class HieraMaskUnitAttention(nn.Module):
'''
Computes either Mask Unit or Global Attention. Also is able to perform query pooling.
Note: this assumes the tokens have already been flattened and unrolled into mask units.
'''
def __init__(self, hidden_size: int, hidden_size_output: int, num_heads: int, query_stride: int=1, window_size: int=0, use_mask_unit_attn: bool=False) -> None:
pass
def forward(self, hidden_states: torch.Tensor, head_mask: Optional[torch.FloatTensor]=None, output_attentions: bool=False) -> tuple[torch.Tensor, Optional[torch.Tensor]]:
'''Input should be of shape [batch, tokens, channels].'''
pass
| 3
| 2
| 29
| 6
| 22
| 2
| 3
| 0.16
| 1
| 4
| 0
| 0
| 2
| 9
| 2
| 12
| 66
| 14
| 45
| 31
| 29
| 7
| 32
| 18
| 29
| 5
| 1
| 1
| 6
|
2,973
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/hiera/modeling_hiera.py
|
transformers.models.hiera.modeling_hiera.HieraMlp
|
import torch
from torch import nn
from ...activations import ACT2FN
class HieraMlp(nn.Module):
def __init__(self, config, dim: int) -> None:
super().__init__()
self.activation_fn = ACT2FN[config.hidden_act]
self.fc1 = nn.Linear(dim, int(dim * config.mlp_ratio))
self.fc2 = nn.Linear(int(dim * config.mlp_ratio), dim)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.fc1(hidden_states)
hidden_states = self.activation_fn(hidden_states)
hidden_states = self.fc2(hidden_states)
return hidden_states
|
class HieraMlp(nn.Module):
def __init__(self, config, dim: int) -> None:
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 5
| 0
| 5
| 0
| 1
| 0
| 1
| 3
| 0
| 0
| 2
| 3
| 2
| 12
| 12
| 1
| 11
| 6
| 8
| 0
| 11
| 6
| 8
| 1
| 1
| 0
| 2
|
2,974
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/hiera/modeling_hiera.py
|
transformers.models.hiera.modeling_hiera.HieraModel
|
from .configuration_hiera import HieraConfig
import math
from ...modeling_outputs import BackboneOutput, BaseModelOutput, BaseModelOutputWithPooling, ImageClassifierOutput, ModelOutput
from ...utils import auto_docstring, logging, torch_int
import torch
from typing import Optional, Union
@auto_docstring
class HieraModel(HieraPreTrainedModel):
def __init__(self, config: HieraConfig, add_pooling_layer: bool=True, is_mae: bool=False):
"""
add_pooling_layer (`bool`, *optional*, defaults to `True`):
Whether or not to apply pooling layer.
is_mae (`bool`, *optional*, defaults to `False`):
Whether or not to run the model on MAE mode.
"""
super().__init__(config)
self.num_features = int(config.embed_dim * config.embed_dim_multiplier ** (len(config.depths) - 1))
self.embeddings = HieraEmbeddings(config, is_mae=is_mae)
self.encoder = HieraEncoder(config)
self.unroll_schedule = [config.query_stride] * len(config.depths[:-1])
self.pooler = HieraPooler(config) if add_pooling_layer else None
self.post_init()
def get_input_embeddings(self) -> HieraPatchEmbeddings:
return self.embeddings.patch_embeddings
def _prune_heads(self, heads_to_prune: dict[int, list[int]]) -> None:
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@auto_docstring
def forward(self, pixel_values: Optional[torch.Tensor]=None, noise: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, interpolate_pos_encoding: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutputWithPooling]:
"""
noise (`torch.FloatTensor` of shape `(batch_size, num_mask_units)`, *optional*):
Mainly used for testing purposes to control randomness and maintain the reproducibility
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('You have to specify pixel_values')
head_mask = self.get_head_mask(head_mask, len(self.config.depths))
embedding_output, bool_masked_pos, ids_restore = self.embeddings(pixel_values, interpolate_pos_encoding=interpolate_pos_encoding, noise=noise)
image_shape = (pixel_values.shape[-2], pixel_values.shape[-1])
hidden_states = unroll(embedding_output, image_shape=image_shape, patch_stride=self.config.patch_stride, schedule=self.unroll_schedule)
if bool_masked_pos is not None:
mask_unit_area = math.prod(self.config.masked_unit_size)
batch_size, _, hidden_size = hidden_states.shape
positions = bool_masked_pos.unsqueeze(-1).tile(1, mask_unit_area, hidden_size)
hidden_states = hidden_states[positions]
hidden_states = hidden_states.view(batch_size, -1, hidden_size)
encoder_outputs = self.encoder(hidden_states, bool_masked_pos=bool_masked_pos, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
sequence_output = encoder_outputs[0]
pooled_output = None
if self.pooler is not None:
pooled_output = self.pooler(sequence_output)
if not return_dict:
head_outputs = (sequence_output, pooled_output) if pooled_output is not None else (sequence_output,)
head_outputs = head_outputs + (bool_masked_pos, ids_restore) if bool_masked_pos is not None else head_outputs
return head_outputs + encoder_outputs[1:]
return HieraModelOutput(last_hidden_state=sequence_output, pooler_output=pooled_output, bool_masked_pos=bool_masked_pos, ids_restore=ids_restore, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, reshaped_hidden_states=encoder_outputs.reshaped_hidden_states)
|
@auto_docstring
class HieraModel(HieraPreTrainedModel):
def __init__(self, config: HieraConfig, add_pooling_layer: bool=True, is_mae: bool=False):
'''
add_pooling_layer (`bool`, *optional*, defaults to `True`):
Whether or not to apply pooling layer.
is_mae (`bool`, *optional*, defaults to `False`):
Whether or not to run the model on MAE mode.
'''
pass
def get_input_embeddings(self) -> HieraPatchEmbeddings:
pass
def _prune_heads(self, heads_to_prune: dict[int, list[int]]) -> None:
'''
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
'''
pass
@auto_docstring
def forward(self, pixel_values: Optional[torch.Tensor]=None, noise: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, interpolate_pos_encoding: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutputWithPooling]:
'''
noise (`torch.FloatTensor` of shape `(batch_size, num_mask_units)`, *optional*):
Mainly used for testing purposes to control randomness and maintain the reproducibility
'''
pass
| 7
| 3
| 26
| 3
| 19
| 4
| 4
| 0.19
| 1
| 12
| 7
| 0
| 4
| 5
| 4
| 5
| 114
| 15
| 83
| 31
| 61
| 16
| 40
| 21
| 35
| 10
| 2
| 1
| 15
|
2,975
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/hiera/modeling_hiera.py
|
transformers.models.hiera.modeling_hiera.HieraModelOutput
|
from typing import Optional, Union
import torch
from ...utils import auto_docstring, logging, torch_int
from ...modeling_outputs import BackboneOutput, BaseModelOutput, BaseModelOutputWithPooling, ImageClassifierOutput, ModelOutput
from dataclasses import dataclass
@dataclass
@auto_docstring(custom_intro="\n Hiera model's outputs that also contains a pooling of the last hidden states.\n ")
class HieraModelOutput(ModelOutput):
"""
pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`, *optional*, returned when `add_pooling_layer=True` is passed):
Average pooling of the last layer hidden-state.
bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, sequence_length)`):
Tensor indicating which patches are masked (0) and which are not (1).
ids_restore (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Tensor containing the original index of the (shuffled) masked patches.
reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, height, width, hidden_size)`. These are the reshaped and re-rolled hidden states of the model.
Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to
include the spatial dimensions.
"""
last_hidden_state: Optional[torch.FloatTensor] = None
pooler_output: Optional[torch.FloatTensor] = None
bool_masked_pos: Optional[torch.BoolTensor] = None
ids_restore: Optional[torch.LongTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
attentions: Optional[tuple[torch.FloatTensor, ...]] = None
reshaped_hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
|
@dataclass
@auto_docstring(custom_intro="\n Hiera model's outputs that also contains a pooling of the last hidden states.\n ")
class HieraModelOutput(ModelOutput):
'''
pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`, *optional*, returned when `add_pooling_layer=True` is passed):
Average pooling of the last layer hidden-state.
bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, sequence_length)`):
Tensor indicating which patches are masked (0) and which are not (1).
ids_restore (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Tensor containing the original index of the (shuffled) masked patches.
reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, height, width, hidden_size)`. These are the reshaped and re-rolled hidden states of the model.
Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to
include the spatial dimensions.
'''
pass
| 3
| 1
| 0
| 0
| 0
| 0
| 0
| 3.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 39
| 5
| 8
| 8
| 7
| 26
| 8
| 8
| 7
| 0
| 1
| 0
| 0
|
2,976
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/hiera/modeling_hiera.py
|
transformers.models.hiera.modeling_hiera.HieraMultiScaleHead
|
import torch
from torch import nn
from .configuration_hiera import HieraConfig
class HieraMultiScaleHead(nn.Module):
def __init__(self, config: HieraConfig):
super().__init__()
self.mask_unit_spatial_shape_final = [i // s ** config.num_query_pool for i, s in zip(config.masked_unit_size, config.query_stride)]
self.stage_dimensions = [int(config.embed_dim * config.embed_dim_multiplier ** i) for i in range(len(config.depths))]
current_masked_unit_size = config.masked_unit_size
self.multi_scale_fusion_heads = nn.ModuleList()
for idx in range(config.num_query_pool):
kernel = [i // s for i, s in zip(current_masked_unit_size, self.mask_unit_spatial_shape_final)]
current_masked_unit_size = [i // s for i, s in zip(current_masked_unit_size, config.query_stride)]
self.multi_scale_fusion_heads.append(nn.Conv2d(self.stage_dimensions[idx], self.stage_dimensions[-1], kernel_size=kernel, stride=kernel))
self.multi_scale_fusion_heads.append(nn.Identity())
def apply_fusion_head(self, head: nn.Module, hidden_states: torch.Tensor) -> torch.Tensor:
if isinstance(head, nn.Identity):
return hidden_states
batch_size, num_mask_units, mask_unit_height, mask_unit_width, hidden_size = hidden_states.shape
hidden_states = hidden_states.reshape(batch_size * num_mask_units, mask_unit_height, mask_unit_width, hidden_size)
hidden_states = hidden_states.permute(0, 3, 1, 2)
hidden_states = head(hidden_states)
hidden_states = hidden_states.permute(0, 2, 3, 1)
mask_unit_height_final, mask_unit_width_final, hidden_size = hidden_states.shape[1:]
hidden_states = hidden_states.reshape(batch_size, num_mask_units, mask_unit_height_final, mask_unit_width_final, hidden_size)
return hidden_states
def forward(self, feature_maps: list[torch.Tensor]) -> torch.Tensor:
hidden_states = 0.0
for head, feature_map in zip(self.multi_scale_fusion_heads, feature_maps):
hidden_states = hidden_states + self.apply_fusion_head(head, feature_map)
return hidden_states
|
class HieraMultiScaleHead(nn.Module):
def __init__(self, config: HieraConfig):
pass
def apply_fusion_head(self, head: nn.Module, hidden_states: torch.Tensor) -> torch.Tensor:
pass
def forward(self, feature_maps: list[torch.Tensor]) -> torch.Tensor:
pass
| 4
| 0
| 17
| 2
| 14
| 2
| 2
| 0.12
| 1
| 6
| 1
| 0
| 3
| 3
| 3
| 13
| 55
| 7
| 43
| 14
| 39
| 5
| 28
| 14
| 24
| 2
| 1
| 1
| 6
|
2,977
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/hiera/modeling_hiera.py
|
transformers.models.hiera.modeling_hiera.HieraPatchEmbeddings
|
from torch import nn
from typing import Optional, Union
import torch
import math
class HieraPatchEmbeddings(nn.Module):
"""
This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial
`hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a
Transformer.
"""
def __init__(self, config, is_mae: bool=False):
super().__init__()
self.spatial_dims = len(config.patch_size)
if self.spatial_dims != 2:
raise ValueError(f'The number of dimensions of the input image should be 2, but got {self.spatial_dims}.')
self.num_channels = config.num_channels
self.image_size = config.image_size[-2:]
self.tokens_spatial_shape = [i // s for i, s in zip(config.image_size, config.patch_stride)]
self.mask_spatial_shape = [i // s for i, s in zip(self.tokens_spatial_shape, config.masked_unit_size)]
self.mask_ratio = config.mask_ratio
self.is_mae = is_mae
self.projection = nn.Conv2d(self.num_channels, config.embed_dim, kernel_size=config.patch_size, stride=config.patch_stride, padding=config.patch_padding)
def masked_conv(self, pixel_values: torch.FloatTensor, bool_masked_pos: Optional[torch.BoolTensor]=None) -> torch.Tensor:
"""Zero-out the masked regions of the input before conv.
Prevents leakage of masked regions when using overlapping kernels.
"""
if bool_masked_pos is None:
return self.projection(pixel_values)
target_size = pixel_values.shape[2:]
bool_masked_pos = bool_masked_pos.view(pixel_values.shape[0], 1, *self.mask_spatial_shape)
bool_masked_pos = nn.functional.interpolate(bool_masked_pos.float(), size=target_size)
return self.projection(pixel_values * bool_masked_pos)
def random_masking(self, pixel_values: torch.FloatTensor, noise: Optional[torch.FloatTensor]=None) -> tuple[torch.BoolTensor, torch.LongTensor]:
"""
Perform per-sample random masking by per-sample shuffling. Per-sample shuffling is done by argsort random
noise.
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`)
noise (`torch.FloatTensor` of shape `(batch_size, num_mask_units)`, *optional*) which is
mainly used for testing purposes to control randomness and maintain the reproducibility
"""
batch_size = pixel_values.shape[0]
num_windows = math.prod(self.mask_spatial_shape)
len_keep = int(num_windows * (1 - self.mask_ratio))
if noise is None:
noise = torch.rand(batch_size, num_windows, device=pixel_values.device)
ids_shuffle = torch.argsort(noise, dim=1)
ids_restore = torch.argsort(ids_shuffle, dim=1).to(pixel_values.device)
bool_masked_pos = torch.zeros([batch_size, num_windows], device=pixel_values.device)
bool_masked_pos[:, :len_keep] = 1
bool_masked_pos = torch.gather(bool_masked_pos, dim=1, index=ids_restore).bool()
return (bool_masked_pos, ids_restore)
def forward(self, pixel_values: torch.FloatTensor, noise: Optional[torch.FloatTensor]=None) -> tuple[torch.Tensor, Optional[torch.BoolTensor], Optional[torch.LongTensor]]:
bool_masked_pos, ids_restore = self.random_masking(pixel_values, noise=noise) if self.is_mae else (None, None)
embeddings = self.masked_conv(pixel_values, bool_masked_pos)
embeddings = embeddings.flatten(2).transpose(2, 1)
return (embeddings, bool_masked_pos, ids_restore)
|
class HieraPatchEmbeddings(nn.Module):
'''
This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial
`hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a
Transformer.
'''
def __init__(self, config, is_mae: bool=False):
pass
def masked_conv(self, pixel_values: torch.FloatTensor, bool_masked_pos: Optional[torch.BoolTensor]=None) -> torch.Tensor:
'''Zero-out the masked regions of the input before conv.
Prevents leakage of masked regions when using overlapping kernels.
'''
pass
def random_masking(self, pixel_values: torch.FloatTensor, noise: Optional[torch.FloatTensor]=None) -> tuple[torch.BoolTensor, torch.LongTensor]:
'''
Perform per-sample random masking by per-sample shuffling. Per-sample shuffling is done by argsort random
noise.
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`)
noise (`torch.FloatTensor` of shape `(batch_size, num_mask_units)`, *optional*) which is
mainly used for testing purposes to control randomness and maintain the reproducibility
'''
pass
def forward(self, pixel_values: torch.FloatTensor, noise: Optional[torch.FloatTensor]=None) -> tuple[torch.Tensor, Optional[torch.BoolTensor], Optional[torch.LongTensor]]:
pass
| 5
| 3
| 21
| 3
| 13
| 5
| 2
| 0.45
| 1
| 6
| 0
| 0
| 4
| 8
| 4
| 14
| 92
| 15
| 53
| 30
| 40
| 24
| 37
| 22
| 32
| 2
| 1
| 1
| 8
|
2,978
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/hiera/modeling_hiera.py
|
transformers.models.hiera.modeling_hiera.HieraPooler
|
from torch import nn
import torch
from .configuration_hiera import HieraConfig
class HieraPooler(nn.Module):
def __init__(self, config: HieraConfig):
super().__init__()
num_features = int(config.embed_dim * config.embed_dim_multiplier ** (len(config.depths) - 1))
self.layernorm = nn.LayerNorm(num_features, eps=config.layer_norm_eps)
self.pooler = nn.AdaptiveAvgPool1d(1)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = hidden_states.transpose(1, 2)
pooled_output = self.pooler(hidden_states)
pooled_output = torch.flatten(pooled_output, 1)
pooled_output = self.layernorm(pooled_output)
return pooled_output
|
class HieraPooler(nn.Module):
def __init__(self, config: HieraConfig):
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 6
| 0
| 6
| 0
| 1
| 0
| 1
| 4
| 1
| 0
| 2
| 2
| 2
| 12
| 13
| 1
| 12
| 7
| 9
| 0
| 12
| 7
| 9
| 1
| 1
| 0
| 2
|
2,979
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/hiera/modeling_hiera.py
|
transformers.models.hiera.modeling_hiera.HieraPreTrainedModel
|
from torch import nn
from ...modeling_utils import PreTrainedModel
from .configuration_hiera import HieraConfig
from ...utils import auto_docstring, logging, torch_int
@auto_docstring
class HieraPreTrainedModel(PreTrainedModel):
config: HieraConfig
base_model_prefix = 'hiera'
main_input_name = 'pixel_values'
supports_gradient_checkpointing = True
def _init_weights(self, module) -> None:
"""Initialize the weights"""
std = self.config.initializer_range
if isinstance(module, HieraEmbeddings):
nn.init.trunc_normal_(module.position_embeddings, std=std)
elif isinstance(module, HieraDecoder):
nn.init.trunc_normal_(module.mask_token, std=std)
nn.init.trunc_normal_(module.decoder_position_embeddings, std=std)
elif isinstance(module, (nn.Linear, nn.Conv1d, nn.Conv2d)):
nn.init.trunc_normal_(module.weight, std=std)
if module.bias is not None:
nn.init.constant_(module.bias, std)
elif isinstance(module, nn.LayerNorm):
nn.init.constant_(module.bias, std)
nn.init.constant_(module.weight, self.config.layer_norm_init)
|
@auto_docstring
class HieraPreTrainedModel(PreTrainedModel):
def _init_weights(self, module) -> None:
'''Initialize the weights'''
pass
| 3
| 1
| 19
| 4
| 14
| 1
| 6
| 0.26
| 1
| 2
| 2
| 4
| 1
| 0
| 1
| 1
| 30
| 6
| 19
| 7
| 17
| 5
| 16
| 7
| 14
| 6
| 1
| 2
| 6
|
2,980
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/hiera/modeling_hiera.py
|
transformers.models.hiera.modeling_hiera.HieraStage
|
from ...modeling_layers import GradientCheckpointingLayer
import torch
from typing import Optional, Union
from torch import nn
class HieraStage(GradientCheckpointingLayer):
def __init__(self, config, depth: int, hidden_size: int, hidden_size_output: int, num_heads: int, drop_path: list[float], query_stride: list[int], window_size: int, use_mask_unit_attn: bool, stage_num: Optional[int]=None) -> None:
super().__init__()
previous_stage_used_masked_attention = False
if stage_num is not None:
previous_stage_used_masked_attention = config.masked_unit_attention[stage_num - 1 if stage_num > 0 else 0]
self.layers = nn.ModuleList([HieraLayer(config=config, hidden_size=hidden_size if i == 0 else hidden_size_output, hidden_size_output=hidden_size_output, num_heads=num_heads, drop_path=drop_path[i], query_stride=query_stride[i], window_size=window_size, use_mask_unit_attn=use_mask_unit_attn or (previous_stage_used_masked_attention and i == 0)) for i in range(depth)])
def forward(self, hidden_states: torch.Tensor, head_mask: Optional[torch.FloatTensor], output_attentions: bool=False) -> tuple[torch.Tensor, Optional[torch.Tensor]]:
for i, layer_module in enumerate(self.layers):
layer_head_mask = head_mask[i] if head_mask is not None else None
hidden_states, attn_weights = layer_module(hidden_states, layer_head_mask, output_attentions=output_attentions)
return (hidden_states, attn_weights)
|
class HieraStage(GradientCheckpointingLayer):
def __init__(self, config, depth: int, hidden_size: int, hidden_size_output: int, num_heads: int, drop_path: list[float], query_stride: list[int], window_size: int, use_mask_unit_attn: bool, stage_num: Optional[int]=None) -> None:
pass
def forward(self, hidden_states: torch.Tensor, head_mask: Optional[torch.FloatTensor], output_attentions: bool=False) -> tuple[torch.Tensor, Optional[torch.Tensor]]:
pass
| 3
| 0
| 23
| 1
| 21
| 2
| 4
| 0.1
| 1
| 8
| 1
| 0
| 2
| 1
| 2
| 12
| 48
| 2
| 42
| 22
| 25
| 4
| 12
| 8
| 9
| 4
| 1
| 1
| 7
|
2,981
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/hubert/configuration_hubert.py
|
transformers.models.hubert.configuration_hubert.HubertConfig
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
class HubertConfig(PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`HubertModel`]. It is used to instantiate an
Hubert model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the Hubert
[facebook/hubert-base-ls960](https://huggingface.co/facebook/hubert-base-ls960) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 32):
Vocabulary size of the Hubert model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`HubertModel`]. Vocabulary size of the model. Defines the different
tokens that can be represented by the *inputs_ids* passed to the forward method of [`HubertModel`].
hidden_size (`int`, *optional*, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (`int`, *optional*, defaults to 3072):
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` are supported.
hidden_dropout(`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
activation_dropout (`float`, *optional*, defaults to 0.1):
The dropout ratio for activations inside the fully connected layer.
attention_dropout(`float`, *optional*, defaults to 0.1):
The dropout ratio for the attention probabilities.
final_dropout (`float`, *optional*, defaults to 0.1):
The dropout probability for the final projection layer of [`Wav2Vec2ForCTC`].
layerdrop (`float`, *optional*, defaults to 0.1):
The LayerDrop probability. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556) for more
details.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
The epsilon used by the layer normalization layers.
feat_extract_norm (`str`, *optional*, defaults to `"group"`):
The norm to be applied to 1D convolutional layers in feature encoder. One of `"group"` for group
normalization of only the first 1D convolutional layer or `"layer"` for layer normalization of all 1D
convolutional layers.
feat_proj_dropout (`float`, *optional*, defaults to 0.0):
The dropout probability for output of the feature encoder.
feat_proj_layer_norm (`bool`, *optional*, defaults to `True`):
Whether to apply LayerNorm to the output of the feature encoder.
feat_extract_activation (`str, `optional`, defaults to `"gelu"`):
The non-linear activation function (function or string) in the 1D convolutional layers of the feature
extractor. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported.
conv_dim (`tuple[int]`, *optional*, defaults to `(512, 512, 512, 512, 512, 512, 512)`):
A tuple of integers defining the number of input and output channels of each 1D convolutional layer in the
feature encoder. The length of *conv_dim* defines the number of 1D convolutional layers.
conv_stride (`tuple[int]`, *optional*, defaults to `(5, 2, 2, 2, 2, 2, 2)`):
A tuple of integers defining the stride of each 1D convolutional layer in the feature encoder. The length
of *conv_stride* defines the number of convolutional layers and has to match the length of *conv_dim*.
conv_kernel (`tuple[int]`, *optional*, defaults to `(10, 3, 3, 3, 3, 3, 3)`):
A tuple of integers defining the kernel size of each 1D convolutional layer in the feature encoder. The
length of *conv_kernel* defines the number of convolutional layers and has to match the length of
*conv_dim*.
conv_bias (`bool`, *optional*, defaults to `False`):
Whether the 1D convolutional layers have a bias.
num_conv_pos_embeddings (`int`, *optional*, defaults to 128):
Number of convolutional positional embeddings. Defines the kernel size of 1D convolutional positional
embeddings layer.
num_conv_pos_embedding_groups (`int`, *optional*, defaults to 16):
Number of groups of 1D convolutional positional embeddings layer.
conv_pos_batch_norm (`bool`, *optional*, defaults to `False`):
Whether to use batch norm instead of weight norm in conv_pos
do_stable_layer_norm (`bool`, *optional*, defaults to `False`):
Whether do apply *stable* layer norm architecture of the Transformer encoder. `do_stable_layer_norm is
True` corresponds to applying layer norm before the attention layer, whereas `do_stable_layer_norm is
False` corresponds to applying layer norm after the attention layer.
apply_spec_augment (`bool`, *optional*, defaults to `True`):
Whether to apply *SpecAugment* data augmentation to the outputs of the feature encoder. For reference see
[SpecAugment: A Simple Data Augmentation Method for Automatic Speech
Recognition](https://huggingface.co/papers/1904.08779).
mask_time_prob (`float`, *optional*, defaults to 0.05):
Percentage (between 0 and 1) of all feature vectors along the time axis which will be masked. The masking
procedure generates ''mask_time_prob*len(time_axis)/mask_time_length'' independent masks over the axis. If
reasoning from the probability of each feature vector to be chosen as the start of the vector span to be
masked, *mask_time_prob* should be `prob_vector_start*mask_time_length`. Note that overlap may decrease the
actual percentage of masked vectors. This is only relevant if `apply_spec_augment is True`.
mask_time_length (`int`, *optional*, defaults to 10):
Length of vector span along the time axis.
mask_time_min_masks (`int`, *optional*, defaults to 2),:
The minimum number of masks of length `mask_feature_length` generated along the time axis, each time step,
irrespectively of `mask_feature_prob`. Only relevant if ''mask_time_prob*len(time_axis)/mask_time_length <
mask_time_min_masks''
mask_feature_prob (`float`, *optional*, defaults to 0.0):
Percentage (between 0 and 1) of all feature vectors along the feature axis which will be masked. The
masking procedure generates ''mask_feature_prob*len(feature_axis)/mask_time_length'' independent masks over
the axis. If reasoning from the probability of each feature vector to be chosen as the start of the vector
span to be masked, *mask_feature_prob* should be `prob_vector_start*mask_feature_length`. Note that overlap
may decrease the actual percentage of masked vectors. This is only relevant if `apply_spec_augment is
True`.
mask_feature_length (`int`, *optional*, defaults to 10):
Length of vector span along the feature axis.
mask_feature_min_masks (`int`, *optional*, defaults to 0),:
The minimum number of masks of length `mask_feature_length` generated along the feature axis, each time
step, irrespectively of `mask_feature_prob`. Only relevant if
''mask_feature_prob*len(feature_axis)/mask_feature_length < mask_feature_min_masks''
ctc_loss_reduction (`str`, *optional*, defaults to `"sum"`):
Specifies the reduction to apply to the output of `torch.nn.CTCLoss`. Only relevant when training an
instance of [`HubertForCTC`].
ctc_zero_infinity (`bool`, *optional*, defaults to `False`):
Whether to zero infinite losses and the associated gradients of `torch.nn.CTCLoss`. Infinite losses mainly
occur when the inputs are too short to be aligned to the targets. Only relevant when training an instance
of [`HubertForCTC`].
use_weighted_layer_sum (`bool`, *optional*, defaults to `False`):
Whether to use a weighted average of layer outputs with learned weights. Only relevant when using an
instance of [`HubertForSequenceClassification`].
classifier_proj_size (`int`, *optional*, defaults to 256):
Dimensionality of the projection before token mean-pooling for classification.
Example:
```python
>>> from transformers import HubertModel, HubertConfig
>>> # Initializing a Hubert facebook/hubert-base-ls960 style configuration
>>> configuration = HubertConfig()
>>> # Initializing a model from the facebook/hubert-base-ls960 style configuration
>>> model = HubertModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = 'hubert'
def __init__(self, vocab_size=32, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout=0.1, activation_dropout=0.1, attention_dropout=0.1, feat_proj_layer_norm=True, feat_proj_dropout=0.0, final_dropout=0.1, layerdrop=0.1, initializer_range=0.02, layer_norm_eps=1e-05, feat_extract_norm='group', feat_extract_activation='gelu', conv_dim=(512, 512, 512, 512, 512, 512, 512), conv_stride=(5, 2, 2, 2, 2, 2, 2), conv_kernel=(10, 3, 3, 3, 3, 2, 2), conv_bias=False, num_conv_pos_embeddings=128, num_conv_pos_embedding_groups=16, conv_pos_batch_norm=False, do_stable_layer_norm=False, apply_spec_augment=True, mask_time_prob=0.05, mask_time_length=10, mask_time_min_masks=2, mask_feature_prob=0.0, mask_feature_length=10, mask_feature_min_masks=0, ctc_loss_reduction='sum', ctc_zero_infinity=False, use_weighted_layer_sum=False, classifier_proj_size=256, pad_token_id=0, bos_token_id=1, eos_token_id=2, **kwargs):
super().__init__(**kwargs, pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id)
self.hidden_size = hidden_size
self.feat_extract_norm = feat_extract_norm
self.feat_extract_activation = feat_extract_activation
self.conv_dim = list(conv_dim)
self.conv_stride = list(conv_stride)
self.conv_kernel = list(conv_kernel)
self.conv_bias = conv_bias
self.num_conv_pos_embeddings = num_conv_pos_embeddings
self.num_conv_pos_embedding_groups = num_conv_pos_embedding_groups
self.conv_pos_batch_norm = conv_pos_batch_norm
self.num_feat_extract_layers = len(self.conv_dim)
self.num_hidden_layers = num_hidden_layers
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.num_attention_heads = num_attention_heads
self.hidden_dropout = hidden_dropout
self.attention_dropout = attention_dropout
self.activation_dropout = activation_dropout
self.feat_proj_layer_norm = feat_proj_layer_norm
self.feat_proj_dropout = feat_proj_dropout
self.final_dropout = final_dropout
self.layerdrop = layerdrop
self.layer_norm_eps = layer_norm_eps
self.initializer_range = initializer_range
self.vocab_size = vocab_size
self.do_stable_layer_norm = do_stable_layer_norm
self.use_weighted_layer_sum = use_weighted_layer_sum
self.classifier_proj_size = classifier_proj_size
if len(self.conv_stride) != self.num_feat_extract_layers or len(self.conv_kernel) != self.num_feat_extract_layers or len(self.conv_dim) != self.num_feat_extract_layers:
raise ValueError(f'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) = {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`, `len(config.conv_kernel) = {len(self.conv_kernel)}`.')
self.apply_spec_augment = apply_spec_augment
self.mask_time_prob = mask_time_prob
self.mask_time_length = mask_time_length
self.mask_time_min_masks = mask_time_min_masks
self.mask_feature_prob = mask_feature_prob
self.mask_feature_length = mask_feature_length
self.mask_feature_min_masks = mask_feature_min_masks
self.ctc_loss_reduction = ctc_loss_reduction
self.ctc_zero_infinity = ctc_zero_infinity
@property
def inputs_to_logits_ratio(self):
return functools.reduce(operator.mul, self.conv_stride, 1)
| null | 4
| 1
| 50
| 2
| 47
| 1
| 2
| 1.29
| 1
| 3
| 0
| 0
| 2
| 37
| 2
| 2
| 236
| 14
| 97
| 84
| 51
| 125
| 45
| 41
| 42
| 2
| 1
| 1
| 3
|
2,982
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/hubert/modeling_hubert.py
|
transformers.models.hubert.modeling_hubert.HubertAttention
|
from .configuration_hubert import HubertConfig
from ...processing_utils import Unpack
import torch
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from typing import Callable, Optional, Union
from ...modeling_flash_attention_utils import FlashAttentionKwargs
import torch.nn as nn
class HubertAttention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, embed_dim: int, num_heads: int, dropout: float=0.0, is_decoder: bool=False, bias: bool=True, is_causal: bool=False, config: Optional[HubertConfig]=None):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
self.config = config
if self.head_dim * num_heads != self.embed_dim:
raise ValueError(f'embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {num_heads}).')
self.scaling = self.head_dim ** (-0.5)
self.is_decoder = is_decoder
self.is_causal = is_causal
self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
def forward(self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, layer_head_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=False, **kwargs: Unpack[FlashAttentionKwargs]) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
"""Input shape: Batch x Time x Channel"""
is_cross_attention = key_value_states is not None
bsz, tgt_len = hidden_states.shape[:-1]
src_len = key_value_states.shape[1] if is_cross_attention else tgt_len
q_input_shape = (bsz, tgt_len, -1, self.head_dim)
kv_input_shape = (bsz, src_len, -1, self.head_dim)
query_states = self.q_proj(hidden_states).view(*q_input_shape).transpose(1, 2)
current_states = key_value_states if is_cross_attention else hidden_states
key_states = self.k_proj(current_states).view(*kv_input_shape).transpose(1, 2)
value_states = self.v_proj(current_states).view(*kv_input_shape).transpose(1, 2)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != 'eager':
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(self, query_states, key_states, value_states, attention_mask, dropout=0.0 if not self.training else self.dropout, scaling=self.scaling, output_attentions=output_attentions, head_mask=layer_head_mask, **kwargs)
attn_output = attn_output.reshape(bsz, tgt_len, -1).contiguous()
attn_output = self.out_proj(attn_output)
return (attn_output, attn_weights, None)
|
class HubertAttention(nn.Module):
'''Multi-headed attention from 'Attention Is All You Need' paper'''
def __init__(self, embed_dim: int, num_heads: int, dropout: float=0.0, is_decoder: bool=False, bias: bool=True, is_causal: bool=False, config: Optional[HubertConfig]=None):
pass
def forward(self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, layer_head_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=False, **kwargs: Unpack[FlashAttentionKwargs]) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
'''Input shape: Batch x Time x Channel'''
pass
| 3
| 2
| 50
| 7
| 35
| 8
| 5
| 0.24
| 1
| 7
| 1
| 2
| 3
| 12
| 3
| 13
| 156
| 23
| 107
| 44
| 86
| 26
| 68
| 27
| 64
| 12
| 1
| 2
| 15
|
2,983
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/hubert/modeling_hubert.py
|
transformers.models.hubert.modeling_hubert.HubertAttnAdapterLayer
|
import torch.nn as nn
import torch
class HubertAttnAdapterLayer(nn.Module):
def __init__(self, config):
"""
Implements adapter modules directly with 3D tensor weight as parameters and without using ModuleList to speed
up training throughput.
"""
super().__init__()
self.input_dim = config.adapter_attn_dim
self.hidden_dim = config.hidden_size
self.norm = nn.LayerNorm(self.hidden_dim)
self.linear_1 = nn.Linear(self.hidden_dim, self.input_dim)
self.act_fn = nn.ReLU()
self.linear_2 = nn.Linear(self.input_dim, self.hidden_dim)
def forward(self, hidden_states: torch.FloatTensor):
hidden_states = self.norm(hidden_states)
hidden_states = self.linear_1(hidden_states)
hidden_states = self.act_fn(hidden_states)
hidden_states = self.linear_2(hidden_states)
return hidden_states
|
class HubertAttnAdapterLayer(nn.Module):
def __init__(self, config):
'''
Implements adapter modules directly with 3D tensor weight as parameters and without using ModuleList to speed
up training throughput.
'''
pass
def forward(self, hidden_states: torch.FloatTensor):
pass
| 3
| 1
| 11
| 2
| 7
| 2
| 1
| 0.27
| 1
| 1
| 0
| 0
| 2
| 6
| 2
| 12
| 23
| 4
| 15
| 9
| 12
| 4
| 15
| 9
| 12
| 1
| 1
| 0
| 2
|
2,984
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/hubert/modeling_hubert.py
|
transformers.models.hubert.modeling_hubert.HubertEncoder
|
from ...integrations.deepspeed import is_deepspeed_zero3_enabled
import torch.nn as nn
from ...modeling_outputs import BaseModelOutput, CausalLMOutput, SequenceClassifierOutput
from ...integrations.fsdp import is_fsdp_managed_module
import torch
from typing import Callable, Optional, Union
from ...modeling_attn_mask_utils import _prepare_4d_attention_mask, _prepare_4d_attention_mask_for_sdpa
class HubertEncoder(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.pos_conv_embed = HubertPositionalConvEmbedding(config)
self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout)
self.layers = nn.ModuleList([HubertEncoderLayer(config) for _ in range(config.num_hidden_layers)])
self.gradient_checkpointing = False
def forward(self, hidden_states: torch.tensor, attention_mask: Optional[torch.Tensor]=None, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True):
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
if attention_mask is not None:
expand_attention_mask = attention_mask.unsqueeze(-1).repeat(1, 1, hidden_states.shape[2])
hidden_states[~expand_attention_mask] = 0
attention_mask = self._update_full_mask(attention_mask, hidden_states)
position_embeddings = self.pos_conv_embed(hidden_states)
hidden_states = hidden_states + position_embeddings
hidden_states = self.layer_norm(hidden_states)
hidden_states = self.dropout(hidden_states)
synced_gpus = is_deepspeed_zero3_enabled() or is_fsdp_managed_module(self)
for layer in self.layers:
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
dropout_probability = torch.rand([])
skip_the_layer = self.training and dropout_probability < self.config.layerdrop
if not skip_the_layer or synced_gpus:
layer_outputs = layer(hidden_states, attention_mask=attention_mask, output_attentions=output_attentions)
hidden_states = layer_outputs[0]
if skip_the_layer:
layer_outputs = (None, None)
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple((v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None))
return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions)
def _update_full_mask(self, attention_mask: Union[torch.Tensor, None], inputs_embeds: torch.Tensor):
if attention_mask is not None:
if 'flash' in self.config._attn_implementation:
attention_mask = attention_mask if 0 in attention_mask else None
elif self.config._attn_implementation == 'sdpa':
attention_mask = _prepare_4d_attention_mask_for_sdpa(attention_mask, inputs_embeds.dtype)
elif self.config._attn_implementation == 'flex_attention':
if isinstance(attention_mask, torch.Tensor):
attention_mask = make_flex_block_causal_mask(attention_mask, is_causal=False)
else:
attention_mask = _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype)
return attention_mask
|
class HubertEncoder(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states: torch.tensor, attention_mask: Optional[torch.Tensor]=None, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True):
pass
def _update_full_mask(self, attention_mask: Union[torch.Tensor, None], inputs_embeds: torch.Tensor):
pass
| 4
| 0
| 41
| 5
| 33
| 3
| 8
| 0.07
| 1
| 8
| 3
| 0
| 2
| 7
| 2
| 12
| 83
| 11
| 67
| 26
| 57
| 5
| 45
| 19
| 42
| 15
| 1
| 3
| 16
|
2,985
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/hubert/modeling_hubert.py
|
transformers.models.hubert.modeling_hubert.HubertEncoderLayer
|
import torch.nn as nn
from ...modeling_layers import GradientCheckpointingLayer
class HubertEncoderLayer(GradientCheckpointingLayer):
def __init__(self, config):
super().__init__()
self.attention = HubertAttention(embed_dim=config.hidden_size, num_heads=config.num_attention_heads, dropout=config.attention_dropout, is_decoder=False, config=config)
self.dropout = nn.Dropout(config.hidden_dropout)
self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.feed_forward = HubertFeedForward(config)
self.final_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states, attention_mask=None, output_attentions=False):
attn_residual = hidden_states
hidden_states, attn_weights, _ = self.attention(hidden_states, attention_mask=attention_mask, output_attentions=output_attentions)
hidden_states = self.dropout(hidden_states)
hidden_states = attn_residual + hidden_states
hidden_states = self.layer_norm(hidden_states)
hidden_states = hidden_states + self.feed_forward(hidden_states)
hidden_states = self.final_layer_norm(hidden_states)
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights,)
return outputs
|
class HubertEncoderLayer(GradientCheckpointingLayer):
def __init__(self, config):
pass
def forward(self, hidden_states, attention_mask=None, output_attentions=False):
pass
| 3
| 0
| 16
| 3
| 13
| 0
| 2
| 0
| 1
| 2
| 1
| 0
| 2
| 5
| 2
| 12
| 33
| 6
| 27
| 11
| 24
| 0
| 20
| 11
| 17
| 2
| 1
| 1
| 3
|
2,986
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/hubert/modeling_hubert.py
|
transformers.models.hubert.modeling_hubert.HubertEncoderLayerStableLayerNorm
|
import torch
import torch.nn as nn
from typing import Callable, Optional, Union
from ...modeling_layers import GradientCheckpointingLayer
class HubertEncoderLayerStableLayerNorm(GradientCheckpointingLayer):
def __init__(self, config):
super().__init__()
self.attention = HubertAttention(embed_dim=config.hidden_size, num_heads=config.num_attention_heads, dropout=config.attention_dropout, is_decoder=False, config=config)
self.dropout = nn.Dropout(config.hidden_dropout)
self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.feed_forward = HubertFeedForward(config)
self.final_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
if getattr(config, 'adapter_attn_dim', None) is not None:
self.adapter_layer = HubertAttnAdapterLayer(config)
else:
self.adapter_layer = None
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, output_attentions: bool=False):
attn_residual = hidden_states
hidden_states = self.layer_norm(hidden_states)
hidden_states, attn_weights, _ = self.attention(hidden_states, attention_mask=attention_mask, output_attentions=output_attentions)
hidden_states = self.dropout(hidden_states)
hidden_states = attn_residual + hidden_states
hidden_states = hidden_states + self.feed_forward(self.final_layer_norm(hidden_states))
if self.adapter_layer is not None:
hidden_states = hidden_states + self.adapter_layer(hidden_states)
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights,)
return outputs
|
class HubertEncoderLayerStableLayerNorm(GradientCheckpointingLayer):
def __init__(self, config):
pass
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, output_attentions: bool=False):
pass
| 3
| 0
| 21
| 3
| 18
| 0
| 3
| 0
| 1
| 5
| 2
| 0
| 2
| 6
| 2
| 12
| 43
| 6
| 37
| 17
| 29
| 0
| 24
| 12
| 21
| 3
| 1
| 1
| 5
|
2,987
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/hubert/modeling_hubert.py
|
transformers.models.hubert.modeling_hubert.HubertEncoderStableLayerNorm
|
from ...integrations.fsdp import is_fsdp_managed_module
from ...integrations.deepspeed import is_deepspeed_zero3_enabled
import torch
from ...modeling_attn_mask_utils import _prepare_4d_attention_mask, _prepare_4d_attention_mask_for_sdpa
from typing import Callable, Optional, Union
import torch.nn as nn
from ...modeling_outputs import BaseModelOutput, CausalLMOutput, SequenceClassifierOutput
class HubertEncoderStableLayerNorm(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.pos_conv_embed = HubertPositionalConvEmbedding(config)
self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout)
self.layers = nn.ModuleList([HubertEncoderLayerStableLayerNorm(config) for _ in range(config.num_hidden_layers)])
self.gradient_checkpointing = False
def forward(self, hidden_states, attention_mask=None, output_attentions=False, output_hidden_states=False, return_dict=True):
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
if attention_mask is not None:
expand_attention_mask = attention_mask.unsqueeze(-1).repeat(1, 1, hidden_states.shape[2])
hidden_states[~expand_attention_mask] = 0
attention_mask = self._update_full_mask(attention_mask, hidden_states)
position_embeddings = self.pos_conv_embed(hidden_states)
hidden_states = hidden_states + position_embeddings
hidden_states = self.dropout(hidden_states)
synced_gpus = is_deepspeed_zero3_enabled() or is_fsdp_managed_module(self)
for layer in self.layers:
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
dropout_probability = torch.rand([])
skip_the_layer = self.training and dropout_probability < self.config.layerdrop
if not skip_the_layer or synced_gpus:
layer_outputs = layer(hidden_states, attention_mask=attention_mask, output_attentions=output_attentions)
hidden_states = layer_outputs[0]
if skip_the_layer:
layer_outputs = (None, None)
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
hidden_states = self.layer_norm(hidden_states)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple((v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None))
return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions)
def _update_full_mask(self, attention_mask: Union[torch.Tensor, None], inputs_embeds: torch.Tensor):
if attention_mask is not None:
if 'flash' in self.config._attn_implementation:
attention_mask = attention_mask if 0 in attention_mask else None
elif self.config._attn_implementation == 'sdpa':
attention_mask = _prepare_4d_attention_mask_for_sdpa(attention_mask, inputs_embeds.dtype)
elif self.config._attn_implementation == 'flex_attention':
if isinstance(attention_mask, torch.Tensor):
attention_mask = make_flex_block_causal_mask(attention_mask, is_causal=False)
else:
attention_mask = _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype)
return attention_mask
|
class HubertEncoderStableLayerNorm(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states, attention_mask=None, output_attentions=False, output_hidden_states=False, return_dict=True):
pass
def _update_full_mask(self, attention_mask: Union[torch.Tensor, None], inputs_embeds: torch.Tensor):
pass
| 4
| 0
| 43
| 6
| 34
| 3
| 8
| 0.09
| 1
| 6
| 3
| 0
| 2
| 7
| 2
| 12
| 87
| 12
| 69
| 27
| 59
| 6
| 45
| 19
| 42
| 15
| 1
| 3
| 16
|
2,988
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/hubert/modeling_hubert.py
|
transformers.models.hubert.modeling_hubert.HubertFeatureEncoder
|
import torch.nn as nn
class HubertFeatureEncoder(nn.Module):
"""Construct the features from raw audio waveform"""
def __init__(self, config):
super().__init__()
if config.feat_extract_norm == 'group':
conv_layers = [HubertGroupNormConvLayer(config, layer_id=0)] + [HubertNoLayerNormConvLayer(config, layer_id=i + 1) for i in range(config.num_feat_extract_layers - 1)]
elif config.feat_extract_norm == 'layer':
conv_layers = [HubertLayerNormConvLayer(config, layer_id=i) for i in range(config.num_feat_extract_layers)]
else:
raise ValueError(f"`config.feat_extract_norm` is {config.feat_extract_norm}, but has to be one of ['group', 'layer']")
self.conv_layers = nn.ModuleList(conv_layers)
self.gradient_checkpointing = False
self._requires_grad = True
def _freeze_parameters(self):
for param in self.parameters():
param.requires_grad = False
self._requires_grad = False
def forward(self, input_values):
hidden_states = input_values[:, None]
if self._requires_grad and self.training:
hidden_states.requires_grad = True
for conv_layer in self.conv_layers:
hidden_states = conv_layer(hidden_states)
return hidden_states
|
class HubertFeatureEncoder(nn.Module):
'''Construct the features from raw audio waveform'''
def __init__(self, config):
pass
def _freeze_parameters(self):
pass
def forward(self, input_values):
pass
| 4
| 1
| 12
| 1
| 11
| 0
| 3
| 0.06
| 1
| 6
| 3
| 1
| 3
| 3
| 3
| 13
| 42
| 7
| 33
| 11
| 29
| 2
| 23
| 11
| 19
| 4
| 1
| 2
| 9
|
2,989
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/hubert/modeling_hubert.py
|
transformers.models.hubert.modeling_hubert.HubertFeatureProjection
|
import torch.nn as nn
class HubertFeatureProjection(nn.Module):
def __init__(self, config):
super().__init__()
self.feat_proj_layer_norm = config.feat_proj_layer_norm
if self.feat_proj_layer_norm:
self.layer_norm = nn.LayerNorm(config.conv_dim[-1], eps=config.layer_norm_eps)
self.projection = nn.Linear(config.conv_dim[-1], config.hidden_size)
self.dropout = nn.Dropout(config.feat_proj_dropout)
def forward(self, hidden_states):
if self.feat_proj_layer_norm:
hidden_states = self.layer_norm(hidden_states)
hidden_states = self.projection(hidden_states)
hidden_states = self.dropout(hidden_states)
return hidden_states
|
class HubertFeatureProjection(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states):
pass
| 3
| 0
| 7
| 0
| 7
| 1
| 2
| 0.07
| 1
| 1
| 0
| 0
| 2
| 4
| 2
| 12
| 16
| 1
| 14
| 7
| 11
| 1
| 14
| 7
| 11
| 2
| 1
| 1
| 4
|
2,990
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/hubert/modeling_hubert.py
|
transformers.models.hubert.modeling_hubert.HubertFeedForward
|
from ...activations import ACT2FN
import torch.nn as nn
class HubertFeedForward(nn.Module):
def __init__(self, config):
super().__init__()
self.intermediate_dropout = nn.Dropout(config.activation_dropout)
self.intermediate_dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
self.output_dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.output_dropout = nn.Dropout(config.hidden_dropout)
def forward(self, hidden_states):
hidden_states = self.intermediate_dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
hidden_states = self.intermediate_dropout(hidden_states)
hidden_states = self.output_dense(hidden_states)
hidden_states = self.output_dropout(hidden_states)
return hidden_states
|
class HubertFeedForward(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states):
pass
| 3
| 0
| 10
| 2
| 9
| 0
| 2
| 0
| 1
| 2
| 0
| 0
| 2
| 5
| 2
| 12
| 22
| 4
| 18
| 8
| 15
| 0
| 17
| 8
| 14
| 2
| 1
| 1
| 3
|
2,991
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/hubert/modeling_hubert.py
|
transformers.models.hubert.modeling_hubert.HubertForCTC
|
import torch.nn as nn
from ...utils import auto_docstring, is_torch_flex_attn_available, logging
import torch
import warnings
from ...modeling_outputs import BaseModelOutput, CausalLMOutput, SequenceClassifierOutput
from typing import Callable, Optional, Union
@auto_docstring(custom_intro='\n Hubert Model with a `language modeling` head on top for Connectionist Temporal Classification (CTC).\n ')
class HubertForCTC(HubertPreTrainedModel):
def __init__(self, config, target_lang: Optional[str]=None):
"""
target_lang (`str`, *optional*):
Language id of adapter weights. Adapter weights are stored in the format adapter.<lang>.safetensors or
adapter.<lang>.bin. Only relevant when using an instance of [`HubertForCTC`] with adapters. Uses 'eng' by
default.
"""
super().__init__(config)
self.hubert = HubertModel(config)
self.dropout = nn.Dropout(config.final_dropout)
self.target_lang = target_lang
if config.vocab_size is None:
raise ValueError(f"You are trying to instantiate {self.__class__} with a configuration that does not define the vocabulary size of the language model head. Please instantiate the model as follows: `HubertForCTC.from_pretrained(..., vocab_size=vocab_size)`. or define `vocab_size` of your model's configuration.")
output_hidden_size = config.output_hidden_size if hasattr(config, 'add_adapter') and config.add_adapter else config.hidden_size
self.lm_head = nn.Linear(output_hidden_size, config.vocab_size)
self.post_init()
def tie_weights(self):
"""
This method overwrites [`~PreTrainedModel.tie_weights`] so that adapter weights can be correctly loaded when
passing `target_lang=...` to `from_pretrained(...)`.
This method is **not** supposed to be called by the user and is prone to be changed in the future.
"""
target_lang = self.target_lang
if target_lang is not None and getattr(self.config, 'adapter_attn_dim', None) is None:
raise ValueError(f'Cannot pass `target_lang`: {target_lang} if `config.adapter_attn_dim` is not defined.')
elif target_lang is None and getattr(self.config, 'adapter_attn_dim', None) is not None:
logger.info("By default `target_lang` is set to 'eng'.")
elif target_lang is not None:
self.load_adapter(target_lang, force_load=True)
def freeze_feature_extractor(self):
"""
Calling this function will disable the gradient computation for the feature encoder so that its parameter will
not be updated during training.
"""
warnings.warn('The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5. Please use the equivalent `freeze_feature_encoder` method instead.', FutureWarning)
self.freeze_feature_encoder()
def freeze_feature_encoder(self):
"""
Calling this function will disable the gradient computation for the feature encoder so that its parameter will
not be updated during training.
"""
self.hubert.feature_extractor._freeze_parameters()
def freeze_base_model(self):
"""
Calling this function will disable the gradient computation for the base model so that its parameters will not
be updated during training. Only the classification head will be updated.
"""
for param in self.hubert.parameters():
param.requires_grad = False
@auto_docstring
def forward(self, input_values: Optional[torch.Tensor], attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, labels: Optional[torch.Tensor]=None) -> Union[tuple, CausalLMOutput]:
"""
labels (`torch.LongTensor` of shape `(batch_size, target_length)`, *optional*):
Labels for connectionist temporal classification. Note that `target_length` has to be smaller or equal to
the sequence length of the output logits. Indices are selected in `[-100, 0, ..., config.vocab_size - 1]`.
All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ...,
config.vocab_size - 1]`.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if labels is not None and labels.max() >= self.config.vocab_size:
raise ValueError(f'Label values must be <= vocab_size: {self.config.vocab_size}')
outputs = self.hubert(input_values, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
hidden_states = outputs[0]
hidden_states = self.dropout(hidden_states)
logits = self.lm_head(hidden_states)
loss = None
if labels is not None:
attention_mask = attention_mask if attention_mask is not None else torch.ones_like(input_values, dtype=torch.long)
input_lengths = self._get_feat_extract_output_lengths(attention_mask.sum(-1)).to(torch.long)
labels_mask = labels >= 0
target_lengths = labels_mask.sum(-1)
flattened_targets = labels.masked_select(labels_mask)
log_probs = nn.functional.log_softmax(logits, dim=-1, dtype=torch.float32).transpose(0, 1)
with torch.backends.cudnn.flags(enabled=False):
loss = nn.functional.ctc_loss(log_probs, flattened_targets, input_lengths, target_lengths, blank=self.config.pad_token_id, reduction=self.config.ctc_loss_reduction, zero_infinity=self.config.ctc_zero_infinity)
if not return_dict:
output = (logits,) + outputs[_HIDDEN_STATES_START_POSITION:]
return (loss,) + output if loss is not None else output
return CausalLMOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
|
@auto_docstring(custom_intro='\n Hubert Model with a `language modeling` head on top for Connectionist Temporal Classification (CTC).\n ')
class HubertForCTC(HubertPreTrainedModel):
def __init__(self, config, target_lang: Optional[str]=None):
'''
target_lang (`str`, *optional*):
Language id of adapter weights. Adapter weights are stored in the format adapter.<lang>.safetensors or
adapter.<lang>.bin. Only relevant when using an instance of [`HubertForCTC`] with adapters. Uses 'eng' by
default.
'''
pass
def tie_weights(self):
'''
This method overwrites [`~PreTrainedModel.tie_weights`] so that adapter weights can be correctly loaded when
passing `target_lang=...` to `from_pretrained(...)`.
This method is **not** supposed to be called by the user and is prone to be changed in the future.
'''
pass
def freeze_feature_extractor(self):
'''
Calling this function will disable the gradient computation for the feature encoder so that its parameter will
not be updated during training.
'''
pass
def freeze_feature_encoder(self):
'''
Calling this function will disable the gradient computation for the feature encoder so that its parameter will
not be updated during training.
'''
pass
def freeze_base_model(self):
'''
Calling this function will disable the gradient computation for the base model so that its parameters will not
be updated during training. Only the classification head will be updated.
'''
pass
@auto_docstring
def forward(self, input_values: Optional[torch.Tensor], attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, labels: Optional[torch.Tensor]=None) -> Union[tuple, CausalLMOutput]:
'''
labels (`torch.LongTensor` of shape `(batch_size, target_length)`, *optional*):
Labels for connectionist temporal classification. Note that `target_length` has to be smaller or equal to
the sequence length of the output logits. Indices are selected in `[-100, 0, ..., config.vocab_size - 1]`.
All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ...,
config.vocab_size - 1]`.
'''
pass
| 9
| 6
| 23
| 3
| 14
| 6
| 3
| 0.35
| 1
| 8
| 2
| 0
| 6
| 4
| 6
| 9
| 149
| 22
| 94
| 33
| 71
| 33
| 47
| 24
| 40
| 7
| 2
| 2
| 18
|
2,992
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/hubert/modeling_hubert.py
|
transformers.models.hubert.modeling_hubert.HubertForSequenceClassification
|
import warnings
from ...modeling_outputs import BaseModelOutput, CausalLMOutput, SequenceClassifierOutput
import torch
from typing import Callable, Optional, Union
from torch.nn import CrossEntropyLoss
from ...utils import auto_docstring, is_torch_flex_attn_available, logging
import torch.nn as nn
@auto_docstring(custom_intro='\n Hubert Model with a sequence classification head on top (a linear layer over the pooled output) for tasks like\n SUPERB Keyword Spotting.\n ')
class HubertForSequenceClassification(HubertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
if hasattr(config, 'add_adapter') and config.add_adapter:
raise ValueError('Sequence classification does not support the use of Hubert adapters (config.add_adapter=True)')
self.hubert = HubertModel(config)
num_layers = config.num_hidden_layers + 1
if config.use_weighted_layer_sum:
self.layer_weights = nn.Parameter(torch.ones(num_layers) / num_layers)
self.projector = nn.Linear(config.hidden_size, config.classifier_proj_size)
self.classifier = nn.Linear(config.classifier_proj_size, config.num_labels)
self.post_init()
def freeze_feature_extractor(self):
"""
Calling this function will disable the gradient computation for the feature encoder so that its parameters will
not be updated during training.
"""
warnings.warn('The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5. Please use the equivalent `freeze_feature_encoder` method instead.', FutureWarning)
self.freeze_feature_encoder()
def freeze_feature_encoder(self):
"""
Calling this function will disable the gradient computation for the feature encoder so that its parameter will
not be updated during training.
"""
self.hubert.feature_extractor._freeze_parameters()
def freeze_base_model(self):
"""
Calling this function will disable the gradient computation for the base model so that its parameters will not
be updated during training. Only the classification head will be updated.
"""
for param in self.hubert.parameters():
param.requires_grad = False
@auto_docstring
def forward(self, input_values: Optional[torch.Tensor], attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, labels: Optional[torch.Tensor]=None) -> Union[tuple, SequenceClassifierOutput]:
"""
input_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):
Float values of input raw speech waveform. Values can be obtained by loading a `.flac` or `.wav` audio file
into an array of type `list[float]`, a `numpy.ndarray` or a `torch.Tensor`, *e.g.* via the torchcodec library
(`pip install torchcodec`) or the soundfile library (`pip install soundfile`).
To prepare the array into `input_values`, the [`AutoProcessor`] should be used for padding and conversion
into a tensor of type `torch.FloatTensor`. See [`HubertProcessor.__call__`] for details.
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
output_hidden_states = True if self.config.use_weighted_layer_sum else output_hidden_states
outputs = self.hubert(input_values, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
if self.config.use_weighted_layer_sum:
hidden_states = outputs[_HIDDEN_STATES_START_POSITION]
hidden_states = torch.stack(hidden_states, dim=1)
norm_weights = nn.functional.softmax(self.layer_weights, dim=-1)
hidden_states = (hidden_states * norm_weights.view(-1, 1, 1)).sum(dim=1)
else:
hidden_states = outputs[0]
hidden_states = self.projector(hidden_states)
if attention_mask is None:
pooled_output = hidden_states.mean(dim=1)
else:
padding_mask = self._get_feature_vector_attention_mask(hidden_states.shape[1], attention_mask)
expand_padding_mask = padding_mask.unsqueeze(-1).repeat(1, 1, hidden_states.shape[2])
hidden_states[~expand_padding_mask] = 0.0
pooled_output = hidden_states.sum(dim=1) / padding_mask.sum(dim=1).view(-1, 1)
logits = self.classifier(pooled_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.config.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[_HIDDEN_STATES_START_POSITION:]
return (loss,) + output if loss is not None else output
return SequenceClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
|
@auto_docstring(custom_intro='\n Hubert Model with a sequence classification head on top (a linear layer over the pooled output) for tasks like\n SUPERB Keyword Spotting.\n ')
class HubertForSequenceClassification(HubertPreTrainedModel):
def __init__(self, config):
pass
def freeze_feature_extractor(self):
'''
Calling this function will disable the gradient computation for the feature encoder so that its parameters will
not be updated during training.
'''
pass
def freeze_feature_encoder(self):
'''
Calling this function will disable the gradient computation for the feature encoder so that its parameter will
not be updated during training.
'''
pass
def freeze_base_model(self):
'''
Calling this function will disable the gradient computation for the base model so that its parameters will not
be updated during training. Only the classification head will be updated.
'''
pass
@auto_docstring
def forward(self, input_values: Optional[torch.Tensor], attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, labels: Optional[torch.Tensor]=None) -> Union[tuple, SequenceClassifierOutput]:
'''
input_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):
Float values of input raw speech waveform. Values can be obtained by loading a `.flac` or `.wav` audio file
into an array of type `list[float]`, a `numpy.ndarray` or a `torch.Tensor`, *e.g.* via the torchcodec library
(`pip install torchcodec`) or the soundfile library (`pip install soundfile`).
To prepare the array into `input_values`, the [`AutoProcessor`] should be used for padding and conversion
into a tensor of type `torch.FloatTensor`. See [`HubertProcessor.__call__`] for details.
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
'''
pass
| 8
| 4
| 20
| 2
| 14
| 4
| 3
| 0.24
| 1
| 7
| 2
| 0
| 5
| 4
| 5
| 8
| 115
| 14
| 82
| 31
| 59
| 20
| 46
| 22
| 40
| 8
| 2
| 1
| 15
|
2,993
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/hubert/modeling_hubert.py
|
transformers.models.hubert.modeling_hubert.HubertGroupNormConvLayer
|
import torch.nn as nn
from ...activations import ACT2FN
from ...modeling_layers import GradientCheckpointingLayer
class HubertGroupNormConvLayer(GradientCheckpointingLayer):
def __init__(self, config, layer_id=0):
super().__init__()
self.in_conv_dim = config.conv_dim[layer_id - 1] if layer_id > 0 else 1
self.out_conv_dim = config.conv_dim[layer_id]
self.conv = nn.Conv1d(self.in_conv_dim, self.out_conv_dim, kernel_size=config.conv_kernel[layer_id], stride=config.conv_stride[layer_id], bias=config.conv_bias)
self.activation = ACT2FN[config.feat_extract_activation]
self.layer_norm = nn.GroupNorm(num_groups=self.out_conv_dim, num_channels=self.out_conv_dim, affine=True)
def forward(self, hidden_states):
hidden_states = self.conv(hidden_states)
hidden_states = self.layer_norm(hidden_states)
hidden_states = self.activation(hidden_states)
return hidden_states
|
class HubertGroupNormConvLayer(GradientCheckpointingLayer):
def __init__(self, config, layer_id=0):
pass
def forward(self, hidden_states):
pass
| 3
| 0
| 10
| 1
| 9
| 0
| 2
| 0
| 1
| 1
| 0
| 0
| 2
| 5
| 2
| 12
| 22
| 3
| 19
| 8
| 16
| 0
| 13
| 8
| 10
| 2
| 1
| 0
| 3
|
2,994
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/hubert/modeling_hubert.py
|
transformers.models.hubert.modeling_hubert.HubertLayerNormConvLayer
|
from ...modeling_layers import GradientCheckpointingLayer
import torch.nn as nn
from ...activations import ACT2FN
class HubertLayerNormConvLayer(GradientCheckpointingLayer):
def __init__(self, config, layer_id=0):
super().__init__()
self.in_conv_dim = config.conv_dim[layer_id - 1] if layer_id > 0 else 1
self.out_conv_dim = config.conv_dim[layer_id]
self.conv = nn.Conv1d(self.in_conv_dim, self.out_conv_dim, kernel_size=config.conv_kernel[layer_id], stride=config.conv_stride[layer_id], bias=config.conv_bias)
self.layer_norm = nn.LayerNorm(self.out_conv_dim, elementwise_affine=True)
self.activation = ACT2FN[config.feat_extract_activation]
def forward(self, hidden_states):
hidden_states = self.conv(hidden_states)
hidden_states = hidden_states.transpose(-2, -1)
hidden_states = self.layer_norm(hidden_states)
hidden_states = hidden_states.transpose(-2, -1)
hidden_states = self.activation(hidden_states)
return hidden_states
|
class HubertLayerNormConvLayer(GradientCheckpointingLayer):
def __init__(self, config, layer_id=0):
pass
def forward(self, hidden_states):
pass
| 3
| 0
| 12
| 2
| 10
| 0
| 2
| 0
| 1
| 1
| 0
| 0
| 2
| 5
| 2
| 12
| 25
| 4
| 21
| 8
| 18
| 0
| 15
| 8
| 12
| 2
| 1
| 0
| 3
|
2,995
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/hubert/modeling_hubert.py
|
transformers.models.hubert.modeling_hubert.HubertModel
|
from ...utils import auto_docstring, is_torch_flex_attn_available, logging
import torch
import torch.nn as nn
from .configuration_hubert import HubertConfig
from typing import Callable, Optional, Union
from ...modeling_outputs import BaseModelOutput, CausalLMOutput, SequenceClassifierOutput
@auto_docstring
class HubertModel(HubertPreTrainedModel):
def __init__(self, config: HubertConfig):
super().__init__(config)
self.config = config
self.feature_extractor = HubertFeatureEncoder(config)
self.feature_projection = HubertFeatureProjection(config)
if config.mask_time_prob > 0.0 or config.mask_feature_prob > 0.0:
self.masked_spec_embed = nn.Parameter(torch.Tensor(config.hidden_size).uniform_())
if config.do_stable_layer_norm:
self.encoder = HubertEncoderStableLayerNorm(config)
else:
self.encoder = HubertEncoder(config)
self.post_init()
def _mask_hidden_states(self, hidden_states: torch.FloatTensor, mask_time_indices: Optional[torch.FloatTensor]=None, attention_mask: Optional[torch.LongTensor]=None):
"""
Masks extracted features along time axis and/or along feature axis according to
[SpecAugment](https://huggingface.co/papers/1904.08779).
"""
if not getattr(self.config, 'apply_spec_augment', True):
return hidden_states
batch_size, sequence_length, hidden_size = hidden_states.size()
if mask_time_indices is not None:
hidden_states[mask_time_indices] = self.masked_spec_embed.to(hidden_states.dtype)
elif self.config.mask_time_prob > 0 and self.training:
mask_time_indices = _compute_mask_indices((batch_size, sequence_length), mask_prob=self.config.mask_time_prob, mask_length=self.config.mask_time_length, attention_mask=attention_mask, min_masks=self.config.mask_time_min_masks)
mask_time_indices = torch.tensor(mask_time_indices, device=hidden_states.device, dtype=torch.bool)
hidden_states[mask_time_indices] = self.masked_spec_embed.to(hidden_states.dtype)
if self.config.mask_feature_prob > 0 and self.training:
mask_feature_indices = _compute_mask_indices((batch_size, hidden_size), mask_prob=self.config.mask_feature_prob, mask_length=self.config.mask_feature_length, min_masks=self.config.mask_feature_min_masks)
mask_feature_indices = torch.tensor(mask_feature_indices, device=hidden_states.device, dtype=torch.bool)
mask_feature_indices = mask_feature_indices[:, None].expand(-1, sequence_length, -1)
hidden_states[mask_feature_indices] = 0
return hidden_states
@auto_docstring
def forward(self, input_values: Optional[torch.Tensor], attention_mask: Optional[torch.Tensor]=None, mask_time_indices: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutput]:
"""
mask_time_indices (`torch.BoolTensor` of shape `(batch_size, sequence_length)`, *optional*):
Indices to mask extracted features for contrastive loss. When in training mode, model learns to predict
masked extracted features in *config.proj_codevector_dim* space.
Example:
```python
>>> from transformers import AutoProcessor, HubertModel
>>> from datasets import load_dataset
>>> processor = AutoProcessor.from_pretrained("facebook/hubert-large-ls960-ft")
>>> model = HubertModel.from_pretrained("facebook/hubert-large-ls960-ft")
>>> def map_to_array(example):
... example["speech"] = example["audio"]["array"]
... return example
>>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
>>> ds = ds.map(map_to_array)
>>> input_values = processor(ds["speech"][0], return_tensors="pt").input_values # Batch size 1
>>> hidden_states = model(input_values).last_hidden_state
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
extract_features = self.feature_extractor(input_values)
extract_features = extract_features.transpose(1, 2)
if attention_mask is not None:
attention_mask = self._get_feature_vector_attention_mask(extract_features.shape[1], attention_mask)
hidden_states = self.feature_projection(extract_features)
hidden_states = self._mask_hidden_states(hidden_states, mask_time_indices=mask_time_indices)
encoder_outputs = self.encoder(hidden_states, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
hidden_states = encoder_outputs[0]
if not return_dict:
return (hidden_states,) + encoder_outputs[1:]
return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions)
|
@auto_docstring
class HubertModel(HubertPreTrainedModel):
def __init__(self, config: HubertConfig):
pass
def _mask_hidden_states(self, hidden_states: torch.FloatTensor, mask_time_indices: Optional[torch.FloatTensor]=None, attention_mask: Optional[torch.LongTensor]=None):
'''
Masks extracted features along time axis and/or along feature axis according to
[SpecAugment](https://huggingface.co/papers/1904.08779).
'''
pass
@auto_docstring
def forward(self, input_values: Optional[torch.Tensor], attention_mask: Optional[torch.Tensor]=None, mask_time_indices: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutput]:
'''
mask_time_indices (`torch.BoolTensor` of shape `(batch_size, sequence_length)`, *optional*):
Indices to mask extracted features for contrastive loss. When in training mode, model learns to predict
masked extracted features in *config.proj_codevector_dim* space.
Example:
```python
>>> from transformers import AutoProcessor, HubertModel
>>> from datasets import load_dataset
>>> processor = AutoProcessor.from_pretrained("facebook/hubert-large-ls960-ft")
>>> model = HubertModel.from_pretrained("facebook/hubert-large-ls960-ft")
>>> def map_to_array(example):
... example["speech"] = example["audio"]["array"]
... return example
>>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
>>> ds = ds.map(map_to_array)
>>> input_values = processor(ds["speech"][0], return_tensors="pt").input_values # Batch size 1
>>> hidden_states = model(input_values).last_hidden_state
```'''
pass
| 6
| 2
| 44
| 8
| 26
| 9
| 5
| 0.35
| 1
| 9
| 6
| 0
| 3
| 5
| 3
| 6
| 137
| 26
| 82
| 28
| 63
| 29
| 42
| 14
| 38
| 6
| 2
| 1
| 14
|
2,996
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/hubert/modeling_hubert.py
|
transformers.models.hubert.modeling_hubert.HubertNoLayerNormConvLayer
|
from ...activations import ACT2FN
import torch.nn as nn
from ...modeling_layers import GradientCheckpointingLayer
class HubertNoLayerNormConvLayer(GradientCheckpointingLayer):
def __init__(self, config, layer_id=0):
super().__init__()
self.in_conv_dim = config.conv_dim[layer_id - 1] if layer_id > 0 else 1
self.out_conv_dim = config.conv_dim[layer_id]
self.conv = nn.Conv1d(self.in_conv_dim, self.out_conv_dim, kernel_size=config.conv_kernel[layer_id], stride=config.conv_stride[layer_id], bias=config.conv_bias)
self.activation = ACT2FN[config.feat_extract_activation]
def forward(self, hidden_states):
hidden_states = self.conv(hidden_states)
hidden_states = self.activation(hidden_states)
return hidden_states
|
class HubertNoLayerNormConvLayer(GradientCheckpointingLayer):
def __init__(self, config, layer_id=0):
pass
def forward(self, hidden_states):
pass
| 3
| 0
| 9
| 1
| 8
| 0
| 2
| 0
| 1
| 1
| 0
| 0
| 2
| 4
| 2
| 12
| 19
| 2
| 17
| 7
| 14
| 0
| 11
| 7
| 8
| 2
| 1
| 0
| 3
|
2,997
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/hubert/modeling_hubert.py
|
transformers.models.hubert.modeling_hubert.HubertPositionalConvEmbedding
|
from ...integrations.deepspeed import is_deepspeed_zero3_enabled
from ...activations import ACT2FN
import torch.nn as nn
class HubertPositionalConvEmbedding(nn.Module):
def __init__(self, config):
super().__init__()
self.conv = nn.Conv1d(config.hidden_size, config.hidden_size, kernel_size=config.num_conv_pos_embeddings, padding=config.num_conv_pos_embeddings // 2, groups=config.num_conv_pos_embedding_groups)
self.batch_norm = None
if config.conv_pos_batch_norm:
self.batch_norm = nn.BatchNorm1d(config.hidden_size)
else:
weight_norm = nn.utils.weight_norm
if hasattr(nn.utils.parametrizations, 'weight_norm'):
weight_norm = nn.utils.parametrizations.weight_norm
if is_deepspeed_zero3_enabled():
import deepspeed
with deepspeed.zero.GatheredParameters(self.conv.weight, modifier_rank=0):
self.conv = weight_norm(self.conv, name='weight', dim=2)
if hasattr(self.conv, 'parametrizations'):
weight_g = self.conv.parametrizations.weight.original0
weight_v = self.conv.parametrizations.weight.original1
else:
weight_g = self.conv.weight_g
weight_v = self.conv.weight_v
deepspeed.zero.register_external_parameter(self, weight_v)
deepspeed.zero.register_external_parameter(self, weight_g)
else:
self.conv = weight_norm(self.conv, name='weight', dim=2)
self.padding = HubertSamePadLayer(config.num_conv_pos_embeddings)
self.activation = ACT2FN[config.feat_extract_activation]
def forward(self, hidden_states):
hidden_states = hidden_states.transpose(1, 2)
if self.batch_norm is not None:
hidden_states = self.batch_norm(hidden_states)
hidden_states = self.conv(hidden_states)
hidden_states = self.padding(hidden_states)
hidden_states = self.activation(hidden_states)
hidden_states = hidden_states.transpose(1, 2)
return hidden_states
|
class HubertPositionalConvEmbedding(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states):
pass
| 3
| 0
| 23
| 3
| 21
| 0
| 4
| 0
| 1
| 2
| 1
| 0
| 2
| 4
| 2
| 12
| 48
| 6
| 42
| 11
| 38
| 0
| 33
| 11
| 29
| 5
| 1
| 3
| 7
|
2,998
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/hubert/modeling_hubert.py
|
transformers.models.hubert.modeling_hubert.HubertPreTrainedModel
|
import torch.nn as nn
from ...integrations.deepspeed import is_deepspeed_zero3_enabled
from ...utils import auto_docstring, is_torch_flex_attn_available, logging
from .configuration_hubert import HubertConfig
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
import torch
from typing import Callable, Optional, Union
@auto_docstring
class HubertPreTrainedModel(PreTrainedModel):
config: HubertConfig
base_model_prefix = 'hubert'
main_input_name = 'input_values'
supports_gradient_checkpointing = True
_supports_flash_attn = True
_supports_sdpa = True
_supports_flex_attn = True
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, nn.Linear):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, (nn.LayerNorm, nn.GroupNorm, nn.BatchNorm1d)):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
elif isinstance(module, nn.Conv1d):
if is_deepspeed_zero3_enabled():
import deepspeed
if hasattr(module, 'weight_v') and hasattr(module, 'weight_g'):
with deepspeed.zero.GatheredParameters([module.weight_v, module.weight_g], modifier_rank=0):
nn.init.kaiming_normal_(module.weight.data)
else:
with deepspeed.zero.GatheredParameters(module.weight, modifier_rank=0):
nn.init.kaiming_normal_(module.weight.data)
else:
nn.init.kaiming_normal_(module.weight.data)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, HubertModel):
if hasattr(module, 'masked_spec_embed'):
module.masked_spec_embed.data.uniform_()
elif isinstance(module, HubertForSequenceClassification):
if hasattr(module, 'layer_weights'):
module.layer_weights.data.fill_(1.0 / (self.config.num_hidden_layers + 1))
def _get_feat_extract_output_lengths(self, input_lengths: Union[torch.LongTensor, int]):
"""
Computes the output length of the convolutional layers
"""
def _conv_out_length(input_length, kernel_size, stride):
return torch.div(input_length - kernel_size, stride, rounding_mode='floor') + 1
for kernel_size, stride in zip(self.config.conv_kernel, self.config.conv_stride):
input_lengths = _conv_out_length(input_lengths, kernel_size, stride)
return input_lengths
def _get_feature_vector_attention_mask(self, feature_vector_length: int, attention_mask: torch.LongTensor):
output_lengths = self._get_feat_extract_output_lengths(attention_mask.sum(-1)).to(torch.long)
batch_size = attention_mask.shape[0]
attention_mask = torch.zeros((batch_size, feature_vector_length), dtype=attention_mask.dtype, device=attention_mask.device)
attention_mask[torch.arange(attention_mask.shape[0], device=attention_mask.device), output_lengths - 1] = 1
attention_mask = attention_mask.flip([-1]).cumsum(-1).flip([-1]).bool()
return attention_mask
|
@auto_docstring
class HubertPreTrainedModel(PreTrainedModel):
def _init_weights(self, module):
'''Initialize the weights'''
pass
def _get_feat_extract_output_lengths(self, input_lengths: Union[torch.LongTensor, int]):
'''
Computes the output length of the convolutional layers
'''
pass
def _conv_out_length(input_length, kernel_size, stride):
pass
def _get_feature_vector_attention_mask(self, feature_vector_length: int, attention_mask: torch.LongTensor):
pass
| 6
| 2
| 13
| 2
| 9
| 3
| 3
| 0.32
| 1
| 2
| 0
| 3
| 3
| 0
| 3
| 3
| 64
| 10
| 41
| 15
| 35
| 13
| 35
| 15
| 29
| 7
| 1
| 4
| 11
|
2,999
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/hubert/modeling_hubert.py
|
transformers.models.hubert.modeling_hubert.HubertSamePadLayer
|
import torch.nn as nn
class HubertSamePadLayer(nn.Module):
def __init__(self, num_conv_pos_embeddings):
super().__init__()
self.num_pad_remove = 1 if num_conv_pos_embeddings % 2 == 0 else 0
def forward(self, hidden_states):
if self.num_pad_remove > 0:
hidden_states = hidden_states[:, :, :-self.num_pad_remove]
return hidden_states
|
class HubertSamePadLayer(nn.Module):
def __init__(self, num_conv_pos_embeddings):
pass
def forward(self, hidden_states):
pass
| 3
| 0
| 4
| 0
| 4
| 0
| 2
| 0
| 1
| 1
| 0
| 0
| 2
| 1
| 2
| 12
| 9
| 1
| 8
| 4
| 5
| 0
| 8
| 4
| 5
| 2
| 1
| 1
| 4
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.