id
int64 0
328k
| repository_name
stringlengths 7
58
| file_path
stringlengths 9
302
| class_name
stringlengths 5
256
| human_written_code
stringlengths 16
2.16M
| class_skeleton
stringlengths 18
1.49M
⌀ | total_program_units
int64 1
1.76k
| total_doc_str
int64 0
771
| AvgCountLine
float64 0
7.89k
| AvgCountLineBlank
float64 0
297
| AvgCountLineCode
float64 0
7.89k
| AvgCountLineComment
float64 0
7.89k
| AvgCyclomatic
float64 0
130
| CommentToCodeRatio
float64 0
168
| CountClassBase
float64 0
40
| CountClassCoupled
float64 0
583
| CountClassCoupledModified
float64 0
575
| CountClassDerived
float64 0
5.35k
| CountDeclInstanceMethod
float64 0
529
| CountDeclInstanceVariable
float64 0
296
| CountDeclMethod
float64 0
599
| CountDeclMethodAll
float64 0
1.12k
| CountLine
float64 1
40.4k
| CountLineBlank
float64 0
8.16k
| CountLineCode
float64 1
25.7k
| CountLineCodeDecl
float64 1
8.15k
| CountLineCodeExe
float64 0
24.2k
| CountLineComment
float64 0
16.5k
| CountStmt
float64 1
9.71k
| CountStmtDecl
float64 1
8.15k
| CountStmtExe
float64 0
9.69k
| MaxCyclomatic
float64 0
759
| MaxInheritanceTree
float64 0
16
| MaxNesting
float64 0
34
| SumCyclomatic
float64 0
2.9k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2,700
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/git/modeling_git.py
|
transformers.models.git.modeling_git.GitLayer
|
from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
from ...utils.deprecation import deprecate_kwarg
from ...modeling_layers import GradientCheckpointingLayer
from ...cache_utils import Cache, DynamicCache
from typing import Callable, Optional, Union
import torch
class GitLayer(GradientCheckpointingLayer):
def __init__(self, config, layer_idx=None):
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = GitAttention(config, layer_idx=layer_idx)
self.intermediate = GitIntermediate(config)
self.output = GitOutput(config)
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, past_key_values: Optional[Cache]=None, output_attentions: Optional[bool]=False, pixel_values_present: Optional[bool]=False) -> tuple[torch.Tensor]:
attention_output, self_attention_weights = self.attention(hidden_states, attention_mask, head_mask, output_attentions=output_attentions, past_key_values=past_key_values, pixel_values_present=pixel_values_present)
layer_output = apply_chunking_to_forward(self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output)
return (layer_output, self_attention_weights)
def feed_forward_chunk(self, attention_output):
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
|
class GitLayer(GradientCheckpointingLayer):
def __init__(self, config, layer_idx=None):
pass
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, past_key_values: Optional[Cache]=None, output_attentions: Optional[bool]=False, pixel_values_present: Optional[bool]=False) -> tuple[torch.Tensor]:
pass
def feed_forward_chunk(self, attention_output):
pass
| 5
| 0
| 15
| 1
| 12
| 1
| 1
| 0.08
| 1
| 7
| 4
| 0
| 3
| 5
| 3
| 13
| 47
| 6
| 38
| 24
| 26
| 3
| 21
| 16
| 17
| 1
| 1
| 0
| 3
|
2,701
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/git/modeling_git.py
|
transformers.models.git.modeling_git.GitModel
|
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPast, BaseModelOutputWithPooling, CausalLMOutputWithPast
from ...cache_utils import Cache, DynamicCache
import torch
from ...utils import ModelOutput, auto_docstring, can_return_tuple, logging, torch_int
from ...modeling_attn_mask_utils import _prepare_4d_attention_mask
from torch import nn
from typing import Callable, Optional, Union
@auto_docstring(custom_intro='\n The bare GIT Model transformer consisting of a CLIP image encoder and text decoder outputting raw hidden-states\n ')
class GitModel(GitPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.config = config
self.embeddings = GitEmbeddings(config)
self.image_encoder = GitVisionModel(config.vision_config)
self.encoder = GitEncoder(config)
self.visual_projection = GitProjection(config)
if config.num_image_with_embedding is not None:
self.img_temporal_embedding = nn.ParameterList((nn.Parameter(torch.zeros(1, 1, config.vision_config.hidden_size)) for _ in range(config.num_image_with_embedding)))
self.post_init()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
def _generate_future_mask(self, size: int, dtype: torch.dtype, device: torch.device) -> torch.Tensor:
mask = torch.triu(torch.ones(size, size, device=device, dtype=dtype), diagonal=1)
mask = mask.masked_fill(mask == 1, float('-inf'))
return mask
def create_attention_mask(self, tgt, memory, tgt_mask, past_key_values_length, memory_key_padding_mask=None):
num_tgt = tgt.shape[1]
num_memory = memory.shape[1]
device = tgt.device
dtype = tgt.dtype
top_left = torch.zeros((num_memory, num_memory), device=device, dtype=dtype)
top_right = torch.full((num_memory, num_tgt + past_key_values_length), float('-inf'), device=tgt.device, dtype=dtype)
bottom_left = torch.zeros((num_tgt, num_memory), dtype=dtype, device=tgt_mask.device)
if past_key_values_length > 0:
tgt_mask = torch.zeros((tgt_mask.shape[0], tgt_mask.shape[0] + past_key_values_length), dtype=dtype, device=tgt_mask.device)
left = torch.cat((top_left, bottom_left), dim=0)
right = torch.cat((top_right, tgt_mask.to(dtype)), dim=0)
full_attention_mask = torch.cat((left, right), dim=1)[None, :]
if memory_key_padding_mask is None:
memory_key_padding_mask = torch.full((memory.shape[0], memory.shape[1]), fill_value=False, device=device)
if memory_key_padding_mask.dtype != torch.bool:
raise ValueError('Memory key padding mask must be a boolean tensor.')
zero_negative_infinity = torch.zeros_like(memory_key_padding_mask, dtype=tgt.dtype)
zero_negative_infinity[memory_key_padding_mask] = float('-inf')
full_attention_mask = full_attention_mask.expand((memory_key_padding_mask.shape[0], num_memory + num_tgt, num_memory + past_key_values_length + num_tgt))
full_attention_mask = full_attention_mask.clone()
origin_left = full_attention_mask[:, :, :num_memory]
update = zero_negative_infinity[:, None, :]
full_attention_mask[:, :, :num_memory] = origin_left + update
full_attention_mask = full_attention_mask[:, None, :, :]
return full_attention_mask
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, pixel_values: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, past_key_values: Optional[Union[Cache, list[torch.FloatTensor]]]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, interpolate_pos_encoding: bool=False, return_dict: Optional[bool]=None) -> Union[tuple[torch.Tensor], BaseModelOutputWithPooling]:
"""
Examples:
```python
>>> from transformers import AutoProcessor, AutoModel
>>> import requests
>>> from PIL import Image
>>> processor = AutoProcessor.from_pretrained("microsoft/git-base")
>>> model = AutoModel.from_pretrained("microsoft/git-base")
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> text = "this is an image of two cats"
>>> inputs = processor(images=image, text=text, return_tensors="pt")
>>> outputs = model(**inputs)
>>> last_hidden_state = outputs.last_hidden_state
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time')
elif input_ids is not None:
self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError('You have to specify either input_ids or inputs_embeds')
seq_length = input_shape[1]
past_key_values_length = 0
if past_key_values is not None:
past_key_values_length = past_key_values.get_seq_length() if not isinstance(past_key_values, Cache) else past_key_values.get_seq_length()
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
projected_visual_features = None
if pixel_values is not None:
if pixel_values.ndim == 4:
visual_features = self.image_encoder(pixel_values, interpolate_pos_encoding=interpolate_pos_encoding).last_hidden_state
elif pixel_values.ndim == 5:
visual_features = []
for frame_idx in range(pixel_values.shape[1]):
visual_features_frame = self.image_encoder(pixel_values[:, frame_idx, :, :], interpolate_pos_encoding=interpolate_pos_encoding).last_hidden_state
visual_features_frame += self.img_temporal_embedding[frame_idx]
visual_features.append(visual_features_frame)
visual_features = torch.cat(visual_features, dim=1)
else:
raise ValueError('pixel_values must be of rank 4 or 5')
projected_visual_features = self.visual_projection(visual_features)
embedding_output = self.embeddings(input_ids=input_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, past_key_values_length=past_key_values_length)
if projected_visual_features is None:
projected_visual_features = torch.zeros((embedding_output.shape[0], 0, embedding_output.shape[2]), dtype=embedding_output.dtype, device=embedding_output.device)
projected_visual_features = projected_visual_features.repeat(embedding_output.size(0) // projected_visual_features.size(0), 1, 1)
hidden_states = torch.cat((projected_visual_features, embedding_output), dim=1)
tgt_mask = self._generate_future_mask(seq_length, embedding_output.dtype, embedding_output.device)
combined_attention_mask = self.create_attention_mask(tgt=embedding_output, memory=projected_visual_features, tgt_mask=tgt_mask, past_key_values_length=past_key_values_length)
if attention_mask is not None:
expanded_attn_mask = _prepare_4d_attention_mask(attention_mask, embedding_output.dtype, tgt_len=input_shape[-1]).to(embedding_output.device)
if past_key_values_length > 0:
expanded_attn_mask = expanded_attn_mask[:, :, -past_key_values_length:, :]
else:
combined_attention_mask[:, :, -input_shape[1]:, -input_shape[1]:] += expanded_attn_mask
encoder_outputs = self.encoder(hidden_states, attention_mask=combined_attention_mask, head_mask=head_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, pixel_values_present=pixel_values is not None)
sequence_output = encoder_outputs[0]
if not return_dict:
return (sequence_output,) + encoder_outputs[1:]
return BaseModelOutputWithPast(last_hidden_state=sequence_output, past_key_values=encoder_outputs.past_key_values, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions)
|
@auto_docstring(custom_intro='\n The bare GIT Model transformer consisting of a CLIP image encoder and text decoder outputting raw hidden-states\n ')
class GitModel(GitPreTrainedModel):
def __init__(self, config):
pass
def get_input_embeddings(self):
pass
def set_input_embeddings(self, value):
pass
def _prune_heads(self, heads_to_prune):
'''
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
'''
pass
def _generate_future_mask(self, size: int, dtype: torch.dtype, device: torch.device) -> torch.Tensor:
pass
def create_attention_mask(self, tgt, memory, tgt_mask, past_key_values_length, memory_key_padding_mask=None):
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, pixel_values: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, past_key_values: Optional[Union[Cache, list[torch.FloatTensor]]]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, interpolate_pos_encoding: bool=False, return_dict: Optional[bool]=None) -> Union[tuple[torch.Tensor], BaseModelOutputWithPooling]:
'''
Examples:
```python
>>> from transformers import AutoProcessor, AutoModel
>>> import requests
>>> from PIL import Image
>>> processor = AutoProcessor.from_pretrained("microsoft/git-base")
>>> model = AutoModel.from_pretrained("microsoft/git-base")
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> text = "this is an image of two cats"
>>> inputs = processor(images=image, text=text, return_tensors="pt")
>>> outputs = model(**inputs)
>>> last_hidden_state = outputs.last_hidden_state
```'''
pass
| 10
| 2
| 36
| 5
| 25
| 6
| 4
| 0.24
| 1
| 14
| 7
| 0
| 7
| 6
| 7
| 137
| 262
| 43
| 176
| 59
| 152
| 43
| 93
| 43
| 85
| 18
| 3
| 3
| 29
|
2,702
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/git/modeling_git.py
|
transformers.models.git.modeling_git.GitOutput
|
import torch
from torch import nn
class GitOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
|
class GitOutput(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 5
| 0
| 5
| 0
| 1
| 0
| 1
| 2
| 0
| 0
| 2
| 3
| 2
| 12
| 12
| 1
| 11
| 6
| 8
| 0
| 11
| 6
| 8
| 1
| 1
| 0
| 2
|
2,703
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/git/modeling_git.py
|
transformers.models.git.modeling_git.GitPreTrainedModel
|
from ...utils import ModelOutput, auto_docstring, can_return_tuple, logging, torch_int
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from torch import nn
from .configuration_git import GitConfig, GitVisionConfig
@auto_docstring
class GitPreTrainedModel(PreTrainedModel):
config: GitConfig
base_model_prefix = 'git'
supports_gradient_checkpointing = True
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, GitVisionEmbeddings):
nn.init.normal_(module.class_embedding, mean=0.0, std=self.config.initializer_range)
nn.init.normal_(module.patch_embedding.weight, std=self.config.initializer_range)
nn.init.normal_(module.position_embedding.weight, std=self.config.initializer_range)
if isinstance(module, nn.Linear):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
|
@auto_docstring
class GitPreTrainedModel(PreTrainedModel):
def _init_weights(self, module):
'''Initialize the weights'''
pass
| 3
| 1
| 19
| 0
| 16
| 3
| 7
| 0.32
| 1
| 1
| 1
| 3
| 1
| 0
| 1
| 130
| 31
| 2
| 22
| 7
| 20
| 7
| 20
| 7
| 18
| 7
| 2
| 2
| 7
|
2,704
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/git/modeling_git.py
|
transformers.models.git.modeling_git.GitProjection
|
import torch
from torch import nn
from .configuration_git import GitConfig, GitVisionConfig
class GitProjection(nn.Module):
def __init__(self, config: GitConfig):
super().__init__()
self.config = config
self.visual_projection = nn.Sequential(nn.Linear(config.vision_config.hidden_size, config.hidden_size), nn.LayerNorm(config.hidden_size, eps=config.vision_config.layer_norm_eps))
def forward(self, embeddings: torch.Tensor) -> torch.Tensor:
return self.visual_projection(embeddings)
|
class GitProjection(nn.Module):
def __init__(self, config: GitConfig):
pass
def forward(self, embeddings: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 5
| 0
| 5
| 0
| 1
| 0
| 1
| 3
| 1
| 0
| 2
| 2
| 2
| 12
| 11
| 1
| 10
| 5
| 7
| 0
| 7
| 5
| 4
| 1
| 1
| 0
| 2
|
2,705
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/git/modeling_git.py
|
transformers.models.git.modeling_git.GitSelfAttention
|
import torch
from ...utils.deprecation import deprecate_kwarg
import math
from ...cache_utils import Cache, DynamicCache
from typing import Callable, Optional, Union
from torch import nn
class GitSelfAttention(nn.Module):
def __init__(self, config, position_embedding_type=None, layer_idx=None):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and (not hasattr(config, 'embedding_size')):
raise ValueError(f'The hidden size ({config.hidden_size}) is not a multiple of the number of attention heads ({config.num_attention_heads})')
self.layer_idx = layer_idx
if layer_idx is None:
logger.warning_once(f'Instantiating {self.__class__.__name__} without passing a `layer_idx` is not recommended and will lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` when creating this class.')
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.image_patch_tokens = int((config.vision_config.image_size / config.vision_config.patch_size) ** 2 + 1)
if config.num_image_with_embedding is not None:
self.image_patch_tokens *= config.num_image_with_embedding
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
self.position_embedding_type = position_embedding_type or getattr(config, 'position_embedding_type', 'absolute')
if self.position_embedding_type == 'relative_key' or self.position_embedding_type == 'relative_key_query':
self.max_position_embeddings = config.max_position_embeddings
self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, past_key_values: Optional[Cache]=None, output_attentions: Optional[bool]=False, pixel_values_present: Optional[bool]=False) -> tuple[torch.Tensor]:
batch_size, seq_length, _ = hidden_states.shape
query_layer = self.query(hidden_states).view(batch_size, -1, self.num_attention_heads, self.attention_head_size).transpose(1, 2)
cutoff = self.image_patch_tokens if pixel_values_present else 0
key_layer = self.key(hidden_states).view(batch_size, -1, self.num_attention_heads, self.attention_head_size).transpose(1, 2)
value_layer = self.value(hidden_states).view(batch_size, -1, self.num_attention_heads, self.attention_head_size).transpose(1, 2)
if past_key_values is not None:
key_layer_past, value_layer_past = past_key_values.update(key_layer[:, :, cutoff:, :], value_layer[:, :, cutoff:, :], self.layer_idx)
key_layer = torch.cat([key_layer[:, :, :cutoff, :], key_layer_past], dim=2)
value_layer = torch.cat([value_layer[:, :, :cutoff, :], value_layer_past], dim=2)
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
if self.position_embedding_type == 'relative_key' or self.position_embedding_type == 'relative_key_query':
query_length, key_length = (query_layer.shape[2], key_layer.shape[2])
if past_key_values is not None:
position_ids_l = torch.tensor(key_length - 1, dtype=torch.long, device=hidden_states.device).view(-1, 1)
else:
position_ids_l = torch.arange(query_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
position_ids_r = torch.arange(key_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
distance = position_ids_l - position_ids_r
positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
positional_embedding = positional_embedding.to(dtype=query_layer.dtype)
if self.position_embedding_type == 'relative_key':
relative_position_scores = torch.einsum('bhld,lrd->bhlr', query_layer, positional_embedding)
attention_scores = attention_scores + relative_position_scores
elif self.position_embedding_type == 'relative_key_query':
relative_position_scores_query = torch.einsum('bhld,lrd->bhlr', query_layer, positional_embedding)
relative_position_scores_key = torch.einsum('bhrd,lrd->bhlr', key_layer, positional_embedding)
attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
if attention_mask is not None:
attention_scores = attention_scores + attention_mask
attention_probs = nn.functional.softmax(attention_scores, dim=-1)
attention_probs = self.dropout(attention_probs)
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(new_context_layer_shape)
return (context_layer, attention_probs)
|
class GitSelfAttention(nn.Module):
def __init__(self, config, position_embedding_type=None, layer_idx=None):
pass
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, past_key_values: Optional[Cache]=None, output_attentions: Optional[bool]=False, pixel_values_present: Optional[bool]=False) -> tuple[torch.Tensor]:
pass
| 4
| 0
| 37
| 6
| 29
| 3
| 5
| 0.09
| 1
| 6
| 1
| 0
| 3
| 12
| 3
| 13
| 115
| 19
| 89
| 44
| 77
| 8
| 66
| 36
| 62
| 10
| 1
| 2
| 16
|
2,706
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/git/modeling_git.py
|
transformers.models.git.modeling_git.GitSelfOutput
|
from torch import nn
import torch
class GitSelfOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
|
class GitSelfOutput(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 5
| 0
| 5
| 0
| 1
| 0
| 1
| 2
| 0
| 0
| 2
| 3
| 2
| 12
| 12
| 1
| 11
| 6
| 8
| 0
| 11
| 6
| 8
| 1
| 1
| 0
| 2
|
2,707
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/git/modeling_git.py
|
transformers.models.git.modeling_git.GitVisionAttention
|
from typing import Callable, Optional, Union
import torch
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from torch import nn
class GitVisionAttention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, config):
super().__init__()
self.config = config
self.embed_dim = config.hidden_size
self.num_heads = config.num_attention_heads
self.head_dim = self.embed_dim // self.num_heads
if self.head_dim * self.num_heads != self.embed_dim:
raise ValueError(f'embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {self.num_heads}).')
self.scale = self.head_dim ** (-0.5)
self.dropout = config.attention_dropout
self.is_causal = False
self.k_proj = nn.Linear(self.embed_dim, self.embed_dim)
self.v_proj = nn.Linear(self.embed_dim, self.embed_dim)
self.q_proj = nn.Linear(self.embed_dim, self.embed_dim)
self.out_proj = nn.Linear(self.embed_dim, self.embed_dim)
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, causal_attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=False) -> tuple[torch.Tensor, Optional[torch.Tensor]]:
"""Input shape: Batch x Time x Channel"""
batch_size, seq_length, embed_dim = hidden_states.shape
queries = self.q_proj(hidden_states)
keys = self.k_proj(hidden_states)
values = self.v_proj(hidden_states)
queries = queries.view(batch_size, seq_length, self.num_heads, self.head_dim).transpose(1, 2)
keys = keys.view(batch_size, seq_length, self.num_heads, self.head_dim).transpose(1, 2)
values = values.view(batch_size, seq_length, self.num_heads, self.head_dim).transpose(1, 2)
if self.config._attn_implementation != 'flash_attention_2':
if attention_mask is not None and causal_attention_mask is not None:
attention_mask = attention_mask + causal_attention_mask
elif causal_attention_mask is not None:
attention_mask = causal_attention_mask
else:
self.is_causal = causal_attention_mask is not None
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != 'eager':
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(self, queries, keys, values, attention_mask, is_causal=self.is_causal, scaling=self.scale, dropout=0.0 if not self.training else self.dropout)
attn_output = attn_output.reshape(batch_size, seq_length, embed_dim).contiguous()
attn_output = self.out_proj(attn_output)
if not output_attentions:
attn_weights = None
return (attn_output, attn_weights)
|
class GitVisionAttention(nn.Module):
'''Multi-headed attention from 'Attention Is All You Need' paper'''
def __init__(self, config):
pass
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, causal_attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=False) -> tuple[torch.Tensor, Optional[torch.Tensor]]:
'''Input shape: Batch x Time x Channel'''
pass
| 3
| 2
| 32
| 5
| 25
| 2
| 4
| 0.11
| 1
| 5
| 0
| 0
| 3
| 10
| 3
| 13
| 102
| 19
| 75
| 30
| 65
| 8
| 54
| 24
| 50
| 8
| 1
| 2
| 11
|
2,708
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/git/modeling_git.py
|
transformers.models.git.modeling_git.GitVisionEmbeddings
|
from .configuration_git import GitConfig, GitVisionConfig
from ...utils import ModelOutput, auto_docstring, can_return_tuple, logging, torch_int
import torch
from torch import nn
class GitVisionEmbeddings(nn.Module):
def __init__(self, config: GitVisionConfig):
super().__init__()
self.config = config
self.embed_dim = config.hidden_size
self.image_size = config.image_size
self.patch_size = config.patch_size
self.class_embedding = nn.Parameter(torch.randn(self.embed_dim))
self.patch_embedding = nn.Conv2d(in_channels=config.num_channels, out_channels=self.embed_dim, kernel_size=self.patch_size, stride=self.patch_size, bias=False)
self.num_patches = (self.image_size // self.patch_size) ** 2
self.num_positions = self.num_patches + 1
self.position_embedding = nn.Embedding(self.num_positions, self.embed_dim)
self.register_buffer('position_ids', torch.arange(self.num_positions).expand((1, -1)), persistent=False)
def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor:
"""
This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution
images. This method is also adapted to support torch.jit tracing.
Adapted from:
- https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174-L194, and
- https://github.com/facebookresearch/dinov2/blob/e1277af2ba9496fbadf7aec6eba56e8d882d1e35/dinov2/models/vision_transformer.py#L179-L211
"""
num_patches = embeddings.shape[1] - 1
position_embedding = self.position_embedding.weight.unsqueeze(0)
num_positions = position_embedding.shape[1] - 1
if not torch.jit.is_tracing() and num_patches == num_positions and (height == width):
return self.position_embedding(self.position_ids)
class_pos_embed = position_embedding[:, :1]
patch_pos_embed = position_embedding[:, 1:]
dim = embeddings.shape[-1]
new_height = height // self.patch_size
new_width = width // self.patch_size
sqrt_num_positions = torch_int(num_positions ** 0.5)
patch_pos_embed = patch_pos_embed.reshape(1, sqrt_num_positions, sqrt_num_positions, dim)
patch_pos_embed = patch_pos_embed.permute(0, 3, 1, 2)
patch_pos_embed = nn.functional.interpolate(patch_pos_embed, size=(new_height, new_width), mode='bicubic', align_corners=False)
patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
return torch.cat((class_pos_embed, patch_pos_embed), dim=1)
def forward(self, pixel_values: torch.FloatTensor, interpolate_pos_encoding=False) -> torch.Tensor:
batch_size, _, height, width = pixel_values.shape
if not interpolate_pos_encoding and (height != self.image_size or width != self.image_size):
raise ValueError(f"Input image size ({height}*{width}) doesn't match model ({self.image_size}*{self.image_size}).")
target_dtype = self.patch_embedding.weight.dtype
patch_embeds = self.patch_embedding(pixel_values.to(dtype=target_dtype))
patch_embeds = patch_embeds.flatten(2).transpose(1, 2)
class_embeds = self.class_embedding.expand(batch_size, 1, -1)
embeddings = torch.cat([class_embeds, patch_embeds], dim=1)
if interpolate_pos_encoding:
embeddings = embeddings + self.interpolate_pos_encoding(embeddings, height, width)
else:
embeddings = embeddings + self.position_embedding(self.position_ids)
return embeddings
|
class GitVisionEmbeddings(nn.Module):
def __init__(self, config: GitVisionConfig):
pass
def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor:
'''
This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution
images. This method is also adapted to support torch.jit tracing.
Adapted from:
- https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174-L194, and
- https://github.com/facebookresearch/dinov2/blob/e1277af2ba9496fbadf7aec6eba56e8d882d1e35/dinov2/models/vision_transformer.py#L179-L211
'''
pass
def forward(self, pixel_values: torch.FloatTensor, interpolate_pos_encoding=False) -> torch.Tensor:
pass
| 4
| 1
| 26
| 5
| 19
| 3
| 2
| 0.16
| 1
| 5
| 1
| 0
| 3
| 9
| 3
| 13
| 81
| 16
| 57
| 27
| 53
| 9
| 43
| 27
| 39
| 3
| 1
| 1
| 6
|
2,709
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/git/modeling_git.py
|
transformers.models.git.modeling_git.GitVisionEncoder
|
import torch
from .configuration_git import GitConfig, GitVisionConfig
from typing import Callable, Optional, Union
from torch import nn
from ...utils import ModelOutput, auto_docstring, can_return_tuple, logging, torch_int
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPast, BaseModelOutputWithPooling, CausalLMOutputWithPast
class GitVisionEncoder(nn.Module):
"""
Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
[`GitVisionEncoderLayer`].
Args:
config: GitVisionConfig
"""
def __init__(self, config: GitVisionConfig):
super().__init__()
self.config = config
self.layers = nn.ModuleList([GitVisionEncoderLayer(config) for _ in range(config.num_hidden_layers)])
self.gradient_checkpointing = False
@can_return_tuple
def forward(self, inputs_embeds, attention_mask: Optional[torch.Tensor]=None, causal_attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutput]:
"""
Args:
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
causal_attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Causal mask for the text model. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
encoder_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
hidden_states = inputs_embeds
for idx, encoder_layer in enumerate(self.layers):
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
layer_outputs = encoder_layer(hidden_states, attention_mask, causal_attention_mask, output_attentions=output_attentions)
hidden_states = layer_outputs[0]
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions)
|
class GitVisionEncoder(nn.Module):
'''
Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
[`GitVisionEncoderLayer`].
Args:
config: GitVisionConfig
'''
def __init__(self, config: GitVisionConfig):
pass
@can_return_tuple
def forward(self, inputs_embeds, attention_mask: Optional[torch.Tensor]=None, causal_attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutput]:
'''
Args:
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
causal_attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Causal mask for the text model. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
pass
| 4
| 2
| 43
| 5
| 25
| 13
| 7
| 0.61
| 1
| 9
| 3
| 0
| 2
| 3
| 2
| 12
| 95
| 13
| 51
| 19
| 40
| 31
| 27
| 11
| 24
| 12
| 1
| 2
| 13
|
2,710
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/git/modeling_git.py
|
transformers.models.git.modeling_git.GitVisionEncoderLayer
|
from ...modeling_layers import GradientCheckpointingLayer
from .configuration_git import GitConfig, GitVisionConfig
import torch
from typing import Callable, Optional, Union
from torch import nn
class GitVisionEncoderLayer(GradientCheckpointingLayer):
def __init__(self, config: GitVisionConfig):
super().__init__()
self.embed_dim = config.hidden_size
self.self_attn = GitVisionAttention(config)
self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
self.mlp = GitVisionMLP(config)
self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, causal_attention_mask: torch.Tensor, output_attentions: Optional[bool]=False) -> tuple[torch.FloatTensor]:
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
`(config.encoder_attention_heads,)`.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
"""
residual = hidden_states
hidden_states = self.layer_norm1(hidden_states)
hidden_states, attn_weights = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, causal_attention_mask=causal_attention_mask, output_attentions=output_attentions)
hidden_states = residual + hidden_states
residual = hidden_states
hidden_states = self.layer_norm2(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + hidden_states
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights,)
return outputs
|
class GitVisionEncoderLayer(GradientCheckpointingLayer):
def __init__(self, config: GitVisionConfig):
pass
def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, causal_attention_mask: torch.Tensor, output_attentions: Optional[bool]=False) -> tuple[torch.FloatTensor]:
'''
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
`(config.encoder_attention_heads,)`.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
'''
pass
| 3
| 1
| 23
| 3
| 16
| 5
| 2
| 0.31
| 1
| 6
| 3
| 0
| 2
| 5
| 2
| 12
| 48
| 6
| 32
| 17
| 23
| 10
| 21
| 11
| 18
| 2
| 1
| 1
| 3
|
2,711
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/git/modeling_git.py
|
transformers.models.git.modeling_git.GitVisionMLP
|
from ...activations import ACT2FN
import torch
from torch import nn
class GitVisionMLP(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.activation_fn = ACT2FN[config.hidden_act]
self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size)
self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.fc1(hidden_states)
hidden_states = self.activation_fn(hidden_states)
hidden_states = self.fc2(hidden_states)
return hidden_states
|
class GitVisionMLP(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 6
| 0
| 6
| 0
| 1
| 0
| 1
| 2
| 0
| 0
| 2
| 4
| 2
| 12
| 13
| 1
| 12
| 7
| 9
| 0
| 12
| 7
| 9
| 1
| 1
| 0
| 2
|
2,712
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/git/modeling_git.py
|
transformers.models.git.modeling_git.GitVisionModel
|
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPast, BaseModelOutputWithPooling, CausalLMOutputWithPast
from .configuration_git import GitConfig, GitVisionConfig
from typing import Callable, Optional, Union
from torch import nn
from ...utils import ModelOutput, auto_docstring, can_return_tuple, logging, torch_int
import torch
@auto_docstring(custom_intro='\n The vision model from CLIP, used in GIT, without any head or projection on top.\n ')
class GitVisionModel(GitPreTrainedModel):
config: GitVisionConfig
main_input_name = 'pixel_values'
def __init__(self, config: GitVisionConfig):
super().__init__(config)
self.vision_model = GitVisionTransformer(config)
self.post_init()
def get_input_embeddings(self) -> nn.Module:
return self.vision_model.embeddings.patch_embedding
@auto_docstring
def forward(self, pixel_values: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, interpolate_pos_encoding: bool=False, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutput]:
"""
Examples:
```python
>>> from PIL import Image
>>> import requests
>>> from transformers import AutoProcessor, GitVisionModel
>>> processor = AutoProcessor.from_pretrained("microsoft/git-base")
>>> model = GitVisionModel.from_pretrained("microsoft/git-base")
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> inputs = processor(images=image, return_tensors="pt")
>>> outputs = model(**inputs)
>>> last_hidden_state = outputs.last_hidden_state
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
return self.vision_model(pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, interpolate_pos_encoding=interpolate_pos_encoding, return_dict=return_dict)
|
@auto_docstring(custom_intro='\n The vision model from CLIP, used in GIT, without any head or projection on top.\n ')
class GitVisionModel(GitPreTrainedModel):
def __init__(self, config: GitVisionConfig):
pass
def get_input_embeddings(self) -> nn.Module:
pass
@auto_docstring
def forward(self, pixel_values: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, interpolate_pos_encoding: bool=False, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutput]:
'''
Examples:
```python
>>> from PIL import Image
>>> import requests
>>> from transformers import AutoProcessor, GitVisionModel
>>> processor = AutoProcessor.from_pretrained("microsoft/git-base")
>>> model = GitVisionModel.from_pretrained("microsoft/git-base")
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> inputs = processor(images=image, return_tensors="pt")
>>> outputs = model(**inputs)
>>> last_hidden_state = outputs.last_hidden_state
```'''
pass
| 6
| 1
| 15
| 2
| 7
| 5
| 1
| 0.63
| 1
| 5
| 3
| 0
| 3
| 1
| 3
| 133
| 54
| 10
| 27
| 15
| 14
| 17
| 12
| 7
| 8
| 2
| 3
| 0
| 4
|
2,713
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/git/modeling_git.py
|
transformers.models.git.modeling_git.GitVisionTransformer
|
from torch import nn
from .configuration_git import GitConfig, GitVisionConfig
import torch
from ...utils import ModelOutput, auto_docstring, can_return_tuple, logging, torch_int
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPast, BaseModelOutputWithPooling, CausalLMOutputWithPast
from typing import Callable, Optional, Union
class GitVisionTransformer(nn.Module):
def __init__(self, config: GitVisionConfig):
super().__init__()
self.config = config
embed_dim = config.hidden_size
self.embeddings = GitVisionEmbeddings(config)
self.pre_layrnorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
self.encoder = GitVisionEncoder(config)
self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
@auto_docstring
def forward(self, pixel_values: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, interpolate_pos_encoding: Optional[bool]=False, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutput]:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('You have to specify pixel_values')
hidden_states = self.embeddings(pixel_values, interpolate_pos_encoding=interpolate_pos_encoding)
hidden_states = self.pre_layrnorm(hidden_states)
encoder_outputs = self.encoder(inputs_embeds=hidden_states, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
last_hidden_state = encoder_outputs[0]
last_hidden_state = self.post_layernorm(last_hidden_state)
if not return_dict:
return (last_hidden_state,) + encoder_outputs[1:]
return BaseModelOutput(last_hidden_state=last_hidden_state, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions)
|
class GitVisionTransformer(nn.Module):
def __init__(self, config: GitVisionConfig):
pass
@auto_docstring
def forward(self, pixel_values: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, interpolate_pos_encoding: Optional[bool]=False, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutput]:
pass
| 4
| 0
| 26
| 5
| 20
| 2
| 4
| 0.09
| 1
| 7
| 4
| 0
| 2
| 5
| 2
| 12
| 57
| 10
| 43
| 20
| 31
| 4
| 23
| 12
| 20
| 6
| 1
| 1
| 7
|
2,714
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/git/processing_git.py
|
transformers.models.git.processing_git.GitProcessor
|
from ...processing_utils import ProcessorMixin
class GitProcessor(ProcessorMixin):
"""
Constructs a GIT processor which wraps a CLIP image processor and a BERT tokenizer into a single processor.
[`GitProcessor`] offers all the functionalities of [`CLIPImageProcessor`] and [`BertTokenizerFast`]. See the
[`~GitProcessor.__call__`] and [`~GitProcessor.decode`] for more information.
Args:
image_processor ([`AutoImageProcessor`]):
The image processor is a required input.
tokenizer ([`AutoTokenizer`]):
The tokenizer is a required input.
"""
attributes = ['image_processor', 'tokenizer']
image_processor_class = 'AutoImageProcessor'
tokenizer_class = 'AutoTokenizer'
def __init__(self, image_processor, tokenizer):
super().__init__(image_processor, tokenizer)
self.current_processor = self.image_processor
|
class GitProcessor(ProcessorMixin):
'''
Constructs a GIT processor which wraps a CLIP image processor and a BERT tokenizer into a single processor.
[`GitProcessor`] offers all the functionalities of [`CLIPImageProcessor`] and [`BertTokenizerFast`]. See the
[`~GitProcessor.__call__`] and [`~GitProcessor.decode`] for more information.
Args:
image_processor ([`AutoImageProcessor`]):
The image processor is a required input.
tokenizer ([`AutoTokenizer`]):
The tokenizer is a required input.
'''
def __init__(self, image_processor, tokenizer):
pass
| 2
| 1
| 18
| 2
| 9
| 7
| 2
| 0.96
| 1
| 4
| 2
| 0
| 5
| 1
| 5
| 22
| 114
| 18
| 49
| 23
| 35
| 47
| 32
| 15
| 26
| 6
| 2
| 2
| 10
|
2,715
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/glm/configuration_glm.py
|
transformers.models.glm.configuration_glm.GlmConfig
|
from ...configuration_utils import PretrainedConfig
class GlmConfig(PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`GlmModel`]. It is used to instantiate an Glm
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the Glm-4-9b-chat.
e.g. [THUDM/glm-4-9b-chat](https://huggingface.co/THUDM/glm-4-9b-chat)
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 151552):
Vocabulary size of the Glm model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`GlmModel`]
hidden_size (`int`, *optional*, defaults to 4096):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 13696):
Dimension of the MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 40):
Number of hidden layers in the Transformer decoder.
num_attention_heads (`int`, *optional*, defaults to 32):
Number of attention heads for each attention layer in the Transformer decoder.
num_key_value_heads (`int`, *optional*, defaults to 2):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
`num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details, check out [this
paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to
`num_attention_heads`.
partial_rotary_factor (`float`, *optional*, defaults to 0.5): The factor of the partial rotary position.
head_dim (`int`, *optional*, defaults to 128):
The attention head dimension.
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The legacy activation function. It is overwritten by the `hidden_activation`.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
max_position_embeddings (`int`, *optional*, defaults to 131072):
The maximum sequence length that this model might ever be used with.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
rms_norm_eps (`float`, *optional*, defaults to 1.5625e-07):
The epsilon used by the rms normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether to tie weight embeddings
rope_theta (`float`, *optional*, defaults to 10000.0):
The base period of the RoPE embeddings.
pad_token_id (`int`, *optional*, defaults to 151329):
Padding token id.
eos_token_id (`int` | `list`, *optional*, defaults to `[151329, 151336, 151338]`):
End of stream token id.
bos_token_id (`int`, *optional*):
Beginning of stream token id.
attention_bias (`bool`, defaults to `False`, *optional*, defaults to `True`):
Whether to use a bias in the query, key, value and output projection layers during self-attention.
```python
>>> from transformers import GlmModel, GlmConfig
>>> # Initializing a Glm glm-4-9b-chat style configuration
>>> configuration = GlmConfig()
>>> # Initializing a model from the glm-4-9b-chat style configuration
>>> model = GlmModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = 'glm'
keys_to_ignore_at_inference = ['past_key_values']
base_model_tp_plan = {'layers.*.self_attn.q_proj': 'colwise', 'layers.*.self_attn.k_proj': 'colwise', 'layers.*.self_attn.v_proj': 'colwise', 'layers.*.self_attn.o_proj': 'rowwise', 'layers.*.mlp.gate_up_proj': 'colwise_rep', 'layers.*.mlp.down_proj': 'rowwise_rep'}
base_model_pp_plan = {'embed_tokens': (['input_ids'], ['inputs_embeds']), 'layers': (['hidden_states', 'attention_mask'], ['hidden_states']), 'norm': (['hidden_states'], ['hidden_states'])}
def __init__(self, vocab_size=151552, hidden_size=4096, intermediate_size=13696, num_hidden_layers=40, num_attention_heads=32, num_key_value_heads=2, partial_rotary_factor=0.5, head_dim=128, hidden_act='silu', attention_dropout=0.0, max_position_embeddings=131072, initializer_range=0.02, rms_norm_eps=1.5625e-07, use_cache=True, tie_word_embeddings=False, rope_theta=10000.0, pad_token_id=151329, eos_token_id=[151329, 151336, 151338], bos_token_id=None, attention_bias=True, **kwargs):
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.partial_rotary_factor = partial_rotary_factor
self.head_dim = head_dim
self.num_key_value_heads = num_key_value_heads
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.rms_norm_eps = rms_norm_eps
self.use_cache = use_cache
self.rope_theta = rope_theta
self.attention_bias = attention_bias
self.attention_dropout = attention_dropout
super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs)
|
class GlmConfig(PretrainedConfig):
'''
This is the configuration class to store the configuration of a [`GlmModel`]. It is used to instantiate an Glm
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the Glm-4-9b-chat.
e.g. [THUDM/glm-4-9b-chat](https://huggingface.co/THUDM/glm-4-9b-chat)
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 151552):
Vocabulary size of the Glm model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`GlmModel`]
hidden_size (`int`, *optional*, defaults to 4096):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 13696):
Dimension of the MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 40):
Number of hidden layers in the Transformer decoder.
num_attention_heads (`int`, *optional*, defaults to 32):
Number of attention heads for each attention layer in the Transformer decoder.
num_key_value_heads (`int`, *optional*, defaults to 2):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
`num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details, check out [this
paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to
`num_attention_heads`.
partial_rotary_factor (`float`, *optional*, defaults to 0.5): The factor of the partial rotary position.
head_dim (`int`, *optional*, defaults to 128):
The attention head dimension.
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The legacy activation function. It is overwritten by the `hidden_activation`.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
max_position_embeddings (`int`, *optional*, defaults to 131072):
The maximum sequence length that this model might ever be used with.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
rms_norm_eps (`float`, *optional*, defaults to 1.5625e-07):
The epsilon used by the rms normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether to tie weight embeddings
rope_theta (`float`, *optional*, defaults to 10000.0):
The base period of the RoPE embeddings.
pad_token_id (`int`, *optional*, defaults to 151329):
Padding token id.
eos_token_id (`int` | `list`, *optional*, defaults to `[151329, 151336, 151338]`):
End of stream token id.
bos_token_id (`int`, *optional*):
Beginning of stream token id.
attention_bias (`bool`, defaults to `False`, *optional*, defaults to `True`):
Whether to use a bias in the query, key, value and output projection layers during self-attention.
```python
>>> from transformers import GlmModel, GlmConfig
>>> # Initializing a Glm glm-4-9b-chat style configuration
>>> configuration = GlmConfig()
>>> # Initializing a model from the glm-4-9b-chat style configuration
>>> model = GlmModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```'''
def __init__(self, vocab_size=151552, hidden_size=4096, intermediate_size=13696, num_hidden_layers=40, num_attention_heads=32, num_key_value_heads=2, partial_rotary_factor=0.5, head_dim=128, hidden_act='silu', attention_dropout=0.0, max_position_embeddings=131072, initializer_range=0.02, rms_norm_eps=1.5625e-07, use_cache=True, tie_word_embeddings=False, rope_theta=10000.0, pad_token_id=151329, eos_token_id=[151329, 151336, 151338], bos_token_id=None, attention_bias=True, **kwargs):
pass
| 2
| 1
| 48
| 1
| 47
| 0
| 1
| 1.14
| 1
| 1
| 0
| 0
| 1
| 16
| 1
| 1
| 125
| 3
| 58
| 44
| 33
| 66
| 22
| 21
| 20
| 1
| 1
| 0
| 1
|
2,716
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/glm/modeling_glm.py
|
transformers.models.glm.modeling_glm.GlmAttention
|
from ...processing_utils import Unpack
from ...utils.deprecation import deprecate_kwarg
import torch.nn as nn
from .configuration_glm import GlmConfig
from typing import Callable, Optional, Union
from ...cache_utils import Cache, DynamicCache
import torch
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple
class GlmAttention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, config: GlmConfig, layer_idx: Optional[int]=None):
super().__init__()
self.config = config
self.layer_idx = layer_idx
self.head_dim = getattr(config, 'head_dim', config.hidden_size // config.num_attention_heads)
self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
self.scaling = self.head_dim ** (-0.5)
self.attention_dropout = config.attention_dropout
self.is_causal = True
self.q_proj = nn.Linear(config.hidden_size, config.num_attention_heads * self.head_dim, bias=config.attention_bias)
self.k_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias)
self.v_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias)
self.o_proj = nn.Linear(config.num_attention_heads * self.head_dim, config.hidden_size, bias=False)
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, position_embeddings: tuple[torch.Tensor, torch.Tensor], attention_mask: Optional[torch.Tensor], past_key_values: Optional[Cache]=None, cache_position: Optional[torch.LongTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> tuple[torch.Tensor, torch.Tensor]:
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, self.head_dim)
query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2)
key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2)
value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2)
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
if past_key_values is not None:
cache_kwargs = {'sin': sin, 'cos': cos, 'cache_position': cache_position}
key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != 'eager':
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(self, query_states, key_states, value_states, attention_mask, dropout=0.0 if not self.training else self.attention_dropout, scaling=self.scaling, **kwargs)
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
attn_output = self.o_proj(attn_output)
return (attn_output, attn_weights)
|
class GlmAttention(nn.Module):
'''Multi-headed attention from 'Attention Is All You Need' paper'''
def __init__(self, config: GlmConfig, layer_idx: Optional[int]=None):
pass
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, position_embeddings: tuple[torch.Tensor, torch.Tensor], attention_mask: Optional[torch.Tensor], past_key_values: Optional[Cache]=None, cache_position: Optional[torch.LongTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> tuple[torch.Tensor, torch.Tensor]:
pass
| 4
| 1
| 34
| 4
| 30
| 1
| 3
| 0.03
| 1
| 6
| 3
| 1
| 2
| 11
| 2
| 12
| 72
| 9
| 61
| 31
| 50
| 2
| 34
| 23
| 31
| 5
| 1
| 2
| 6
|
2,717
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/glm/modeling_glm.py
|
transformers.models.glm.modeling_glm.GlmDecoderLayer
|
from ...processing_utils import Unpack
from typing import Callable, Optional, Union
from .configuration_glm import GlmConfig
import torch.nn as nn
from ...modeling_layers import GenericForSequenceClassification, GenericForTokenClassification, GradientCheckpointingLayer
from ...utils.deprecation import deprecate_kwarg
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple
from ...cache_utils import Cache, DynamicCache
import torch
class GlmDecoderLayer(GradientCheckpointingLayer):
def __init__(self, config: GlmConfig, layer_idx: int):
super().__init__()
self.hidden_size = config.hidden_size
self.self_attn = GlmAttention(config=config, layer_idx=layer_idx)
self.mlp = GlmMLP(config)
self.input_layernorm = GlmRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.post_attention_layernorm = GlmRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, use_cache: Optional[bool]=False, cache_position: Optional[torch.LongTensor]=None, position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]]=None, **kwargs: Unpack[TransformersKwargs]) -> torch.Tensor:
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
hidden_states, _ = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, use_cache=use_cache, cache_position=cache_position, position_embeddings=position_embeddings, **kwargs)
hidden_states = residual + hidden_states
residual = hidden_states
hidden_states = self.post_attention_layernorm(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + hidden_states
return hidden_states
|
class GlmDecoderLayer(GradientCheckpointingLayer):
def __init__(self, config: GlmConfig, layer_idx: int):
pass
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, use_cache: Optional[bool]=False, cache_position: Optional[torch.LongTensor]=None, position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]]=None, **kwargs: Unpack[TransformersKwargs]) -> torch.Tensor:
pass
| 4
| 0
| 25
| 4
| 21
| 2
| 2
| 0.07
| 1
| 10
| 6
| 0
| 2
| 5
| 2
| 12
| 52
| 8
| 42
| 22
| 28
| 3
| 21
| 11
| 18
| 2
| 1
| 1
| 3
|
2,718
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/glm/modeling_glm.py
|
transformers.models.glm.modeling_glm.GlmForCausalLM
|
from typing import Callable, Optional, Union
from ...cache_utils import Cache, DynamicCache
from ...modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast
from ...processing_utils import Unpack
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple
import torch
from ...generation import GenerationMixin
import torch.nn as nn
@auto_docstring
class GlmForCausalLM(GlmPreTrainedModel, GenerationMixin):
_tied_weights_keys = ['lm_head.weight']
_tp_plan = {'lm_head': 'colwise_rep'}
_pp_plan = {'lm_head': (['hidden_states'], ['logits'])}
def __init__(self, config):
super().__init__(config)
self.model = GlmModel(config)
self.vocab_size = config.vocab_size
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.post_init()
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, logits_to_keep: Union[int, torch.Tensor]=0, **kwargs: Unpack[TransformersKwargs]) -> CausalLMOutputWithPast:
"""
Example:
```python
>>> from transformers import AutoTokenizer, GlmForCausalLM
>>> model = GlmForCausalLM.from_pretrained("meta-glm/Glm-2-7b-hf")
>>> tokenizer = AutoTokenizer.from_pretrained("meta-glm/Glm-2-7b-hf")
>>> prompt = "Hey, are you conscious? Can you talk to me?"
>>> inputs = tokenizer(prompt, return_tensors="pt")
>>> # Generate
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"Hey, are you conscious? Can you talk to me?\\nI'm not conscious, but I can talk to you."
```"""
outputs: BaseModelOutputWithPast = self.model(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, cache_position=cache_position, **kwargs)
hidden_states = outputs.last_hidden_state
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :])
loss = None
if labels is not None:
loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.vocab_size, **kwargs)
return CausalLMOutputWithPast(loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
|
@auto_docstring
class GlmForCausalLM(GlmPreTrainedModel, GenerationMixin):
def __init__(self, config):
pass
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, logits_to_keep: Union[int, torch.Tensor]=0, **kwargs: Unpack[TransformersKwargs]) -> CausalLMOutputWithPast:
'''
Example:
```python
>>> from transformers import AutoTokenizer, GlmForCausalLM
>>> model = GlmForCausalLM.from_pretrained("meta-glm/Glm-2-7b-hf")
>>> tokenizer = AutoTokenizer.from_pretrained("meta-glm/Glm-2-7b-hf")
>>> prompt = "Hey, are you conscious? Can you talk to me?"
>>> inputs = tokenizer(prompt, return_tensors="pt")
>>> # Generate
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
```'''
pass
| 6
| 1
| 14
| 2
| 9
| 4
| 2
| 0.38
| 2
| 9
| 4
| 0
| 8
| 3
| 8
| 9
| 123
| 21
| 74
| 36
| 47
| 28
| 36
| 20
| 27
| 8
| 2
| 1
| 15
|
2,719
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/glm/modeling_glm.py
|
transformers.models.glm.modeling_glm.GlmForSequenceClassification
|
from ...modeling_layers import GenericForSequenceClassification, GenericForTokenClassification, GradientCheckpointingLayer
class GlmForSequenceClassification(GenericForSequenceClassification, GlmPreTrainedModel):
pass
|
class GlmForSequenceClassification(GenericForSequenceClassification, GlmPreTrainedModel):
pass
| 1
| 0
| 21
| 2
| 17
| 2
| 3
| 0.11
| 1
| 7
| 3
| 0
| 4
| 3
| 4
| 5
| 90
| 11
| 71
| 31
| 53
| 8
| 36
| 18
| 31
| 9
| 2
| 1
| 12
|
2,720
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/glm/modeling_glm.py
|
transformers.models.glm.modeling_glm.GlmForTokenClassification
|
from ...modeling_layers import GenericForSequenceClassification, GenericForTokenClassification, GradientCheckpointingLayer
class GlmForTokenClassification(GenericForTokenClassification, GlmPreTrainedModel):
pass
|
class GlmForTokenClassification(GenericForTokenClassification, GlmPreTrainedModel):
pass
| 1
| 0
| 17
| 1
| 14
| 2
| 3
| 0.11
| 1
| 5
| 2
| 0
| 4
| 4
| 4
| 5
| 79
| 8
| 64
| 28
| 41
| 7
| 29
| 15
| 24
| 5
| 2
| 1
| 10
|
2,721
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/glm/modeling_glm.py
|
transformers.models.glm.modeling_glm.GlmMLP
|
import torch
import torch.nn as nn
from ...activations import ACT2FN
class GlmMLP(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.gate_up_proj = nn.Linear(config.hidden_size, 2 * config.intermediate_size, bias=False)
self.down_proj = nn.Linear(config.intermediate_size, config.hidden_size, bias=False)
self.activation_fn = ACT2FN[config.hidden_act]
def forward(self, hidden_states: torch.FloatTensor) -> torch.FloatTensor:
up_states = self.gate_up_proj(hidden_states)
gate, up_states = up_states.chunk(2, dim=-1)
up_states = up_states * self.activation_fn(gate)
return self.down_proj(up_states)
|
class GlmMLP(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states: torch.FloatTensor) -> torch.FloatTensor:
pass
| 3
| 0
| 7
| 2
| 6
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 2
| 4
| 2
| 12
| 16
| 4
| 12
| 9
| 9
| 0
| 12
| 9
| 9
| 1
| 1
| 0
| 2
|
2,722
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/glm/modeling_glm.py
|
transformers.models.glm.modeling_glm.GlmModel
|
from ...cache_utils import Cache, DynamicCache
from ...processing_utils import Unpack
from typing import Callable, Optional, Union
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple
from .configuration_glm import GlmConfig
from ...masking_utils import create_causal_mask
from ...utils.generic import check_model_inputs
import torch.nn as nn
from ...modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast
import torch
@auto_docstring
class GlmModel(GlmPreTrainedModel):
def __init__(self, config: GlmConfig):
super().__init__(config)
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
self.layers = nn.ModuleList([GlmDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)])
self.norm = GlmRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.rotary_emb = GlmRotaryEmbedding(config=config)
self.gradient_checkpointing = False
self.post_init()
@check_model_inputs
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, cache_position: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, **kwargs: Unpack[TransformersKwargs]) -> BaseModelOutputWithPast:
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError('You must specify exactly one of input_ids or inputs_embeds')
if inputs_embeds is None:
inputs_embeds: torch.Tensor = self.embed_tokens(input_ids)
if use_cache and past_key_values is None:
past_key_values = DynamicCache(config=self.config)
if cache_position is None:
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
cache_position: torch.Tensor = torch.arange(past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device)
if position_ids is None:
position_ids = cache_position.unsqueeze(0)
causal_mask = create_causal_mask(config=self.config, input_embeds=inputs_embeds, attention_mask=attention_mask, cache_position=cache_position, past_key_values=past_key_values, position_ids=position_ids)
hidden_states = inputs_embeds
position_embeddings = self.rotary_emb(hidden_states, position_ids)
for decoder_layer in self.layers[:self.config.num_hidden_layers]:
hidden_states = decoder_layer(hidden_states, attention_mask=causal_mask, position_ids=position_ids, past_key_values=past_key_values, cache_position=cache_position, position_embeddings=position_embeddings, **kwargs)
hidden_states = self.norm(hidden_states)
return BaseModelOutputWithPast(last_hidden_state=hidden_states, past_key_values=past_key_values)
|
@auto_docstring
class GlmModel(GlmPreTrainedModel):
def __init__(self, config: GlmConfig):
pass
@check_model_inputs
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, cache_position: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, **kwargs: Unpack[TransformersKwargs]) -> BaseModelOutputWithPast:
pass
| 6
| 0
| 40
| 5
| 30
| 6
| 6
| 0.22
| 1
| 16
| 10
| 0
| 5
| 7
| 6
| 7
| 257
| 34
| 184
| 65
| 146
| 40
| 89
| 34
| 82
| 21
| 2
| 2
| 37
|
2,723
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/glm/modeling_glm.py
|
transformers.models.glm.modeling_glm.GlmPreTrainedModel
|
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple
from .configuration_glm import GlmConfig
@auto_docstring
class GlmPreTrainedModel(PreTrainedModel):
config: GlmConfig
base_model_prefix = 'model'
supports_gradient_checkpointing = True
_no_split_modules = ['GlmDecoderLayer']
_skip_keys_device_placement = ['past_key_values']
_supports_flash_attn = True
_supports_sdpa = True
_supports_flex_attn = True
_can_compile_fullgraph = True
_supports_attention_backend = True
_can_record_outputs = {'hidden_states': GlmDecoderLayer, 'attentions': GlmAttention}
|
@auto_docstring
class GlmPreTrainedModel(PreTrainedModel):
pass
| 2
| 0
| 10
| 0
| 10
| 0
| 5
| 0
| 1
| 0
| 0
| 4
| 1
| 0
| 1
| 1
| 24
| 1
| 23
| 15
| 21
| 0
| 22
| 15
| 20
| 5
| 1
| 2
| 5
|
2,724
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/glm/modeling_glm.py
|
transformers.models.glm.modeling_glm.GlmRMSNorm
|
import torch.nn as nn
from ...integrations import use_kernel_forward_from_hub
import torch
@use_kernel_forward_from_hub('RMSNorm')
class GlmRMSNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-06):
"""
GlmRMSNorm is equivalent to T5LayerNorm
"""
super().__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.variance_epsilon = eps
def forward(self, hidden_states):
input_dtype = hidden_states.dtype
hidden_states = hidden_states.to(torch.float32)
variance = hidden_states.pow(2).mean(-1, keepdim=True)
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
return self.weight * hidden_states.to(input_dtype)
def extra_repr(self):
return f'{tuple(self.weight.shape)}, eps={self.variance_epsilon}'
|
@use_kernel_forward_from_hub('RMSNorm')
class GlmRMSNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-06):
'''
GlmRMSNorm is equivalent to T5LayerNorm
'''
pass
def forward(self, hidden_states):
pass
def extra_repr(self):
pass
| 5
| 1
| 5
| 0
| 4
| 1
| 1
| 0.23
| 1
| 2
| 0
| 0
| 3
| 2
| 3
| 13
| 18
| 2
| 13
| 8
| 9
| 3
| 13
| 8
| 9
| 1
| 1
| 0
| 3
|
2,725
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/glm/modeling_glm.py
|
transformers.models.glm.modeling_glm.GlmRotaryEmbedding
|
from .configuration_glm import GlmConfig
import torch.nn as nn
from ...modeling_rope_utils import ROPE_INIT_FUNCTIONS, dynamic_rope_update
import torch
class GlmRotaryEmbedding(nn.Module):
inv_freq: torch.Tensor
def __init__(self, config: GlmConfig, device=None):
super().__init__()
if hasattr(config, 'rope_scaling') and isinstance(config.rope_scaling, dict):
self.rope_type = config.rope_scaling.get('rope_type', config.rope_scaling.get('type'))
else:
self.rope_type = 'default'
self.max_seq_len_cached = config.max_position_embeddings
self.original_max_seq_len = config.max_position_embeddings
self.config = config
self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device)
self.register_buffer('inv_freq', inv_freq, persistent=False)
self.original_inv_freq = self.inv_freq
@torch.no_grad()
@dynamic_rope_update
def forward(self, x, position_ids):
inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device)
position_ids_expanded = position_ids[:, None, :].float()
device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != 'mps' else 'cpu'
with torch.autocast(device_type=device_type, enabled=False):
freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
emb = torch.cat((freqs, freqs), dim=-1)
cos = emb.cos() * self.attention_scaling
sin = emb.sin() * self.attention_scaling
return (cos.to(dtype=x.dtype), sin.to(dtype=x.dtype))
|
class GlmRotaryEmbedding(nn.Module):
def __init__(self, config: GlmConfig, device=None):
pass
@torch.no_grad()
@dynamic_rope_update
def forward(self, x, position_ids):
pass
| 5
| 0
| 18
| 2
| 13
| 5
| 3
| 0.35
| 1
| 4
| 1
| 1
| 3
| 7
| 3
| 13
| 59
| 8
| 40
| 21
| 35
| 14
| 38
| 20
| 34
| 3
| 1
| 1
| 8
|
2,726
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/glm/modular_glm.py
|
transformers.models.glm.modular_glm.GlmAttention
|
import torch.nn as nn
from .configuration_glm import GlmConfig
from typing import Optional
from ..llama.modeling_llama import LlamaAttention, LlamaForCausalLM, LlamaForSequenceClassification, LlamaForTokenClassification
class GlmAttention(LlamaAttention):
def __init__(self, config: GlmConfig, layer_idx: Optional[int]=None):
super().__init__(config, layer_idx)
self.o_proj = nn.Linear(config.num_attention_heads * self.head_dim, config.hidden_size, bias=False)
|
class GlmAttention(LlamaAttention):
def __init__(self, config: GlmConfig, layer_idx: Optional[int]=None):
pass
| 2
| 0
| 3
| 0
| 3
| 0
| 1
| 0
| 1
| 3
| 1
| 0
| 1
| 2
| 1
| 13
| 4
| 0
| 4
| 3
| 2
| 0
| 4
| 3
| 2
| 1
| 2
| 0
| 1
|
2,727
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/glm/modular_glm.py
|
transformers.models.glm.modular_glm.GlmForCausalLM
|
from ..llama.modeling_llama import LlamaAttention, LlamaForCausalLM, LlamaForSequenceClassification, LlamaForTokenClassification
class GlmForCausalLM(LlamaForCausalLM):
pass
|
class GlmForCausalLM(LlamaForCausalLM):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 9
| 2
| 0
| 2
| 1
| 1
| 0
| 2
| 1
| 1
| 0
| 3
| 0
| 0
|
2,728
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/glm/modular_glm.py
|
transformers.models.glm.modular_glm.GlmForSequenceClassification
|
from ..llama.modeling_llama import LlamaAttention, LlamaForCausalLM, LlamaForSequenceClassification, LlamaForTokenClassification
class GlmForSequenceClassification(LlamaForSequenceClassification):
pass
|
class GlmForSequenceClassification(LlamaForSequenceClassification):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 5
| 2
| 0
| 2
| 1
| 1
| 0
| 2
| 1
| 1
| 0
| 3
| 0
| 0
|
2,729
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/glm/modular_glm.py
|
transformers.models.glm.modular_glm.GlmForTokenClassification
|
from ..llama.modeling_llama import LlamaAttention, LlamaForCausalLM, LlamaForSequenceClassification, LlamaForTokenClassification
class GlmForTokenClassification(LlamaForTokenClassification):
pass
|
class GlmForTokenClassification(LlamaForTokenClassification):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 5
| 2
| 0
| 2
| 1
| 1
| 0
| 2
| 1
| 1
| 0
| 3
| 0
| 0
|
2,730
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/glm/modular_glm.py
|
transformers.models.glm.modular_glm.GlmMLP
|
from ..phi3.modeling_phi3 import Phi3MLP
class GlmMLP(Phi3MLP):
pass
|
class GlmMLP(Phi3MLP):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 12
| 2
| 0
| 2
| 1
| 1
| 0
| 2
| 1
| 1
| 0
| 2
| 0
| 0
|
2,731
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/glpn/configuration_glpn.py
|
transformers.models.glpn.configuration_glpn.GLPNConfig
|
from ...configuration_utils import PretrainedConfig
class GLPNConfig(PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`GLPNModel`]. It is used to instantiate an GLPN
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the GLPN
[vinvino02/glpn-kitti](https://huggingface.co/vinvino02/glpn-kitti) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
num_channels (`int`, *optional*, defaults to 3):
The number of input channels.
num_encoder_blocks (`int`, *optional*, defaults to 4):
The number of encoder blocks (i.e. stages in the Mix Transformer encoder).
depths (`list[int]`, *optional*, defaults to `[2, 2, 2, 2]`):
The number of layers in each encoder block.
sr_ratios (`list[int]`, *optional*, defaults to `[8, 4, 2, 1]`):
Sequence reduction ratios in each encoder block.
hidden_sizes (`list[int]`, *optional*, defaults to `[32, 64, 160, 256]`):
Dimension of each of the encoder blocks.
patch_sizes (`list[int]`, *optional*, defaults to `[7, 3, 3, 3]`):
Patch size before each encoder block.
strides (`list[int]`, *optional*, defaults to `[4, 2, 2, 2]`):
Stride before each encoder block.
num_attention_heads (`list[int]`, *optional*, defaults to `[1, 2, 5, 8]`):
Number of attention heads for each attention layer in each block of the Transformer encoder.
mlp_ratios (`list[int]`, *optional*, defaults to `[4, 4, 4, 4]`):
Ratio of the size of the hidden layer compared to the size of the input layer of the Mix FFNs in the
encoder blocks.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` are supported.
hidden_dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
drop_path_rate (`float`, *optional*, defaults to 0.1):
The dropout probability for stochastic depth, used in the blocks of the Transformer encoder.
layer_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon used by the layer normalization layers.
decoder_hidden_size (`int`, *optional*, defaults to 64):
The dimension of the decoder.
max_depth (`int`, *optional*, defaults to 10):
The maximum depth of the decoder.
head_in_index (`int`, *optional*, defaults to -1):
The index of the features to use in the head.
Example:
```python
>>> from transformers import GLPNModel, GLPNConfig
>>> # Initializing a GLPN vinvino02/glpn-kitti style configuration
>>> configuration = GLPNConfig()
>>> # Initializing a model from the vinvino02/glpn-kitti style configuration
>>> model = GLPNModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = 'glpn'
def __init__(self, num_channels=3, num_encoder_blocks=4, depths=[2, 2, 2, 2], sr_ratios=[8, 4, 2, 1], hidden_sizes=[32, 64, 160, 256], patch_sizes=[7, 3, 3, 3], strides=[4, 2, 2, 2], num_attention_heads=[1, 2, 5, 8], mlp_ratios=[4, 4, 4, 4], hidden_act='gelu', hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, initializer_range=0.02, drop_path_rate=0.1, layer_norm_eps=1e-06, decoder_hidden_size=64, max_depth=10, head_in_index=-1, **kwargs):
super().__init__(**kwargs)
self.num_channels = num_channels
self.num_encoder_blocks = num_encoder_blocks
self.depths = depths
self.sr_ratios = sr_ratios
self.hidden_sizes = hidden_sizes
self.patch_sizes = patch_sizes
self.strides = strides
self.mlp_ratios = mlp_ratios
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.initializer_range = initializer_range
self.drop_path_rate = drop_path_rate
self.layer_norm_eps = layer_norm_eps
self.decoder_hidden_size = decoder_hidden_size
self.max_depth = max_depth
self.head_in_index = head_in_index
|
class GLPNConfig(PretrainedConfig):
'''
This is the configuration class to store the configuration of a [`GLPNModel`]. It is used to instantiate an GLPN
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the GLPN
[vinvino02/glpn-kitti](https://huggingface.co/vinvino02/glpn-kitti) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
num_channels (`int`, *optional*, defaults to 3):
The number of input channels.
num_encoder_blocks (`int`, *optional*, defaults to 4):
The number of encoder blocks (i.e. stages in the Mix Transformer encoder).
depths (`list[int]`, *optional*, defaults to `[2, 2, 2, 2]`):
The number of layers in each encoder block.
sr_ratios (`list[int]`, *optional*, defaults to `[8, 4, 2, 1]`):
Sequence reduction ratios in each encoder block.
hidden_sizes (`list[int]`, *optional*, defaults to `[32, 64, 160, 256]`):
Dimension of each of the encoder blocks.
patch_sizes (`list[int]`, *optional*, defaults to `[7, 3, 3, 3]`):
Patch size before each encoder block.
strides (`list[int]`, *optional*, defaults to `[4, 2, 2, 2]`):
Stride before each encoder block.
num_attention_heads (`list[int]`, *optional*, defaults to `[1, 2, 5, 8]`):
Number of attention heads for each attention layer in each block of the Transformer encoder.
mlp_ratios (`list[int]`, *optional*, defaults to `[4, 4, 4, 4]`):
Ratio of the size of the hidden layer compared to the size of the input layer of the Mix FFNs in the
encoder blocks.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` are supported.
hidden_dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
drop_path_rate (`float`, *optional*, defaults to 0.1):
The dropout probability for stochastic depth, used in the blocks of the Transformer encoder.
layer_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon used by the layer normalization layers.
decoder_hidden_size (`int`, *optional*, defaults to 64):
The dimension of the decoder.
max_depth (`int`, *optional*, defaults to 10):
The maximum depth of the decoder.
head_in_index (`int`, *optional*, defaults to -1):
The index of the features to use in the head.
Example:
```python
>>> from transformers import GLPNModel, GLPNConfig
>>> # Initializing a GLPN vinvino02/glpn-kitti style configuration
>>> configuration = GLPNConfig()
>>> # Initializing a model from the vinvino02/glpn-kitti style configuration
>>> model = GLPNModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```'''
def __init__(self, num_channels=3, num_encoder_blocks=4, depths=[2, 2, 2, 2], sr_ratios=[8, 4, 2, 1], hidden_sizes=[32, 64, 160, 256], patch_sizes=[7, 3, 3, 3], strides=[4, 2, 2, 2], num_attention_heads=[1, 2, 5, 8], mlp_ratios=[4, 4, 4, 4], hidden_act='gelu', hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, initializer_range=0.02, drop_path_rate=0.1, layer_norm_eps=1e-06, decoder_hidden_size=64, max_depth=10, head_in_index=-1, **kwargs):
pass
| 2
| 1
| 42
| 1
| 41
| 0
| 1
| 1.3
| 1
| 1
| 0
| 0
| 1
| 18
| 1
| 1
| 109
| 10
| 43
| 42
| 20
| 56
| 22
| 21
| 20
| 1
| 1
| 0
| 1
|
2,732
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/glpn/feature_extraction_glpn.py
|
transformers.models.glpn.feature_extraction_glpn.GLPNFeatureExtractor
|
from ...utils.import_utils import requires
import warnings
from .image_processing_glpn import GLPNImageProcessor
@requires(backends=('vision',))
class GLPNFeatureExtractor(GLPNImageProcessor):
def __init__(self, *args, **kwargs) -> None:
warnings.warn('The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please use GLPNImageProcessor instead.', FutureWarning)
super().__init__(*args, **kwargs)
|
@requires(backends=('vision',))
class GLPNFeatureExtractor(GLPNImageProcessor):
def __init__(self, *args, **kwargs) -> None:
pass
| 3
| 0
| 7
| 0
| 7
| 0
| 1
| 0
| 1
| 2
| 0
| 0
| 1
| 0
| 1
| 25
| 8
| 0
| 8
| 2
| 6
| 0
| 4
| 2
| 2
| 1
| 4
| 0
| 1
|
2,733
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/glpn/image_processing_glpn.py
|
transformers.models.glpn.image_processing_glpn.GLPNImageProcessor
|
from ...utils import TensorType, filter_out_non_signature_kwargs, logging, requires_backends
from ...utils.import_utils import requires
from ...image_transforms import resize, to_channel_dimension_format
from ...image_utils import ChannelDimension, PILImageResampling, get_image_size, infer_channel_dimension_format, is_scaled_image, is_torch_available, make_flat_list_of_images, to_numpy_array, valid_images, validate_preprocess_arguments
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from typing import TYPE_CHECKING, Optional, Union
import numpy as np
@requires(backends=('vision',))
class GLPNImageProcessor(BaseImageProcessor):
"""
Constructs a GLPN image processor.
Args:
do_resize (`bool`, *optional*, defaults to `True`):
Whether to resize the image's (height, width) dimensions, rounding them down to the closest multiple of
`size_divisor`. Can be overridden by `do_resize` in `preprocess`.
size_divisor (`int`, *optional*, defaults to 32):
When `do_resize` is `True`, images are resized so their height and width are rounded down to the closest
multiple of `size_divisor`. Can be overridden by `size_divisor` in `preprocess`.
resample (`PIL.Image` resampling filter, *optional*, defaults to `Resampling.BILINEAR`):
Resampling filter to use if resizing the image. Can be overridden by `resample` in `preprocess`.
do_rescale (`bool`, *optional*, defaults to `True`):
Whether or not to apply the scaling factor (to make pixel values floats between 0. and 1.). Can be
overridden by `do_rescale` in `preprocess`.
"""
model_input_names = ['pixel_values']
def __init__(self, do_resize: bool=True, size_divisor: int=32, resample=PILImageResampling.BILINEAR, do_rescale: bool=True, **kwargs) -> None:
self.do_resize = do_resize
self.do_rescale = do_rescale
self.size_divisor = size_divisor
self.resample = resample
super().__init__(**kwargs)
def resize(self, image: np.ndarray, size_divisor: int, resample: PILImageResampling=PILImageResampling.BILINEAR, data_format: Optional[ChannelDimension]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray:
"""
Resize the image, rounding the (height, width) dimensions down to the closest multiple of size_divisor.
If the image is of dimension (3, 260, 170) and size_divisor is 32, the image will be resized to (3, 256, 160).
Args:
image (`np.ndarray`):
The image to resize.
size_divisor (`int`):
The image is resized so its height and width are rounded down to the closest multiple of
`size_divisor`.
resample:
`PIL.Image` resampling filter to use when resizing the image e.g. `PILImageResampling.BILINEAR`.
data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the output image. If `None`, the channel dimension format of the input
image is used. Can be one of:
- `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `ChannelDimension.LAST`: image in (height, width, num_channels) format.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the input image. If not set, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
Returns:
`np.ndarray`: The resized image.
"""
height, width = get_image_size(image, channel_dim=input_data_format)
new_h = height // size_divisor * size_divisor
new_w = width // size_divisor * size_divisor
image = resize(image, (new_h, new_w), resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs)
return image
@filter_out_non_signature_kwargs()
def preprocess(self, images: Union['PIL.Image.Image', TensorType, list['PIL.Image.Image'], list[TensorType]], do_resize: Optional[bool]=None, size_divisor: Optional[int]=None, resample=None, do_rescale: Optional[bool]=None, return_tensors: Optional[Union[TensorType, str]]=None, data_format: ChannelDimension=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> BatchFeature:
"""
Preprocess the given images.
Args:
images (`PIL.Image.Image` or `TensorType` or `list[np.ndarray]` or `list[TensorType]`):
Images to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
passing in images with pixel values between 0 and 1, set `do_normalize=False`.
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
Whether to resize the input such that the (height, width) dimensions are a multiple of `size_divisor`.
size_divisor (`int`, *optional*, defaults to `self.size_divisor`):
When `do_resize` is `True`, images are resized so their height and width are rounded down to the
closest multiple of `size_divisor`.
resample (`PIL.Image` resampling filter, *optional*, defaults to `self.resample`):
`PIL.Image` resampling filter to use if resizing the image e.g. `PILImageResampling.BILINEAR`. Only has
an effect if `do_resize` is set to `True`.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether or not to apply the scaling factor (to make pixel values floats between 0. and 1.).
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- `None`: Return a list of `np.ndarray`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. Can be one of:
- `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `ChannelDimension.LAST`: image in (height, width, num_channels) format.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
"""
do_resize = do_resize if do_resize is not None else self.do_resize
do_rescale = do_rescale if do_rescale is not None else self.do_rescale
size_divisor = size_divisor if size_divisor is not None else self.size_divisor
resample = resample if resample is not None else self.resample
images = make_flat_list_of_images(images)
if not valid_images(images):
raise ValueError('Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, or torch.Tensor')
validate_preprocess_arguments(do_resize=do_resize, size=size_divisor, resample=resample)
images = [to_numpy_array(img) for img in images]
if do_rescale and is_scaled_image(images[0]):
logger.warning_once('It looks like you are trying to rescale already rescaled images. If the input images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again.')
if input_data_format is None:
input_data_format = infer_channel_dimension_format(images[0])
if do_resize:
images = [self.resize(image, size_divisor=size_divisor, resample=resample, input_data_format=input_data_format) for image in images]
if do_rescale:
images = [self.rescale(image, scale=1 / 255, input_data_format=input_data_format) for image in images]
images = [to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images]
data = {'pixel_values': images}
return BatchFeature(data=data, tensor_type=return_tensors)
def post_process_depth_estimation(self, outputs: 'DepthEstimatorOutput', target_sizes: Optional[Union[TensorType, list[tuple[int, int]], None]]=None) -> list[dict[str, TensorType]]:
"""
Converts the raw output of [`DepthEstimatorOutput`] into final depth predictions and depth PIL images.
Only supports PyTorch.
Args:
outputs ([`DepthEstimatorOutput`]):
Raw outputs of the model.
target_sizes (`TensorType` or `list[tuple[int, int]]`, *optional*):
Tensor of shape `(batch_size, 2)` or list of tuples (`tuple[int, int]`) containing the target size
(height, width) of each image in the batch. If left to None, predictions will not be resized.
Returns:
`list[dict[str, TensorType]]`: A list of dictionaries of tensors representing the processed depth
predictions.
"""
requires_backends(self, 'torch')
predicted_depth = outputs.predicted_depth
if target_sizes is not None and len(predicted_depth) != len(target_sizes):
raise ValueError('Make sure that you pass in as many target sizes as the batch dimension of the predicted depth')
results = []
target_sizes = [None] * len(predicted_depth) if target_sizes is None else target_sizes
for depth, target_size in zip(predicted_depth, target_sizes):
if target_size is not None:
depth = depth[None, None, ...]
depth = torch.nn.functional.interpolate(depth, size=target_size, mode='bicubic', align_corners=False)
depth = depth.squeeze()
results.append({'predicted_depth': depth})
return results
|
@requires(backends=('vision',))
class GLPNImageProcessor(BaseImageProcessor):
'''
Constructs a GLPN image processor.
Args:
do_resize (`bool`, *optional*, defaults to `True`):
Whether to resize the image's (height, width) dimensions, rounding them down to the closest multiple of
`size_divisor`. Can be overridden by `do_resize` in `preprocess`.
size_divisor (`int`, *optional*, defaults to 32):
When `do_resize` is `True`, images are resized so their height and width are rounded down to the closest
multiple of `size_divisor`. Can be overridden by `size_divisor` in `preprocess`.
resample (`PIL.Image` resampling filter, *optional*, defaults to `Resampling.BILINEAR`):
Resampling filter to use if resizing the image. Can be overridden by `resample` in `preprocess`.
do_rescale (`bool`, *optional*, defaults to `True`):
Whether or not to apply the scaling factor (to make pixel values floats between 0. and 1.). Can be
overridden by `do_rescale` in `preprocess`.
'''
def __init__(self, do_resize: bool=True, size_divisor: int=32, resample=PILImageResampling.BILINEAR, do_rescale: bool=True, **kwargs) -> None:
pass
def resize(self, image: np.ndarray, size_divisor: int, resample: PILImageResampling=PILImageResampling.BILINEAR, data_format: Optional[ChannelDimension]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray:
'''
Resize the image, rounding the (height, width) dimensions down to the closest multiple of size_divisor.
If the image is of dimension (3, 260, 170) and size_divisor is 32, the image will be resized to (3, 256, 160).
Args:
image (`np.ndarray`):
The image to resize.
size_divisor (`int`):
The image is resized so its height and width are rounded down to the closest multiple of
`size_divisor`.
resample:
`PIL.Image` resampling filter to use when resizing the image e.g. `PILImageResampling.BILINEAR`.
data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the output image. If `None`, the channel dimension format of the input
image is used. Can be one of:
- `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `ChannelDimension.LAST`: image in (height, width, num_channels) format.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the input image. If not set, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
Returns:
`np.ndarray`: The resized image.
'''
pass
@filter_out_non_signature_kwargs()
def preprocess(self, images: Union['PIL.Image.Image', TensorType, list['PIL.Image.Image'], list[TensorType]], do_resize: Optional[bool]=None, size_divisor: Optional[int]=None, resample=None, do_rescale: Optional[bool]=None, return_tensors: Optional[Union[TensorType, str]]=None, data_format: ChannelDimension=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> BatchFeature:
'''
Preprocess the given images.
Args:
images (`PIL.Image.Image` or `TensorType` or `list[np.ndarray]` or `list[TensorType]`):
Images to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
passing in images with pixel values between 0 and 1, set `do_normalize=False`.
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
Whether to resize the input such that the (height, width) dimensions are a multiple of `size_divisor`.
size_divisor (`int`, *optional*, defaults to `self.size_divisor`):
When `do_resize` is `True`, images are resized so their height and width are rounded down to the
closest multiple of `size_divisor`.
resample (`PIL.Image` resampling filter, *optional*, defaults to `self.resample`):
`PIL.Image` resampling filter to use if resizing the image e.g. `PILImageResampling.BILINEAR`. Only has
an effect if `do_resize` is set to `True`.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether or not to apply the scaling factor (to make pixel values floats between 0. and 1.).
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- `None`: Return a list of `np.ndarray`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. Can be one of:
- `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `ChannelDimension.LAST`: image in (height, width, num_channels) format.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
'''
pass
def post_process_depth_estimation(self, outputs: 'DepthEstimatorOutput', target_sizes: Optional[Union[TensorType, list[tuple[int, int]], None]]=None) -> list[dict[str, TensorType]]:
'''
Converts the raw output of [`DepthEstimatorOutput`] into final depth predictions and depth PIL images.
Only supports PyTorch.
Args:
outputs ([`DepthEstimatorOutput`]):
Raw outputs of the model.
target_sizes (`TensorType` or `list[tuple[int, int]]`, *optional*):
Tensor of shape `(batch_size, 2)` or list of tuples (`tuple[int, int]`) containing the target size
(height, width) of each image in the batch. If left to None, predictions will not be resized.
Returns:
`list[dict[str, TensorType]]`: A list of dictionaries of tensors representing the processed depth
predictions.
'''
pass
| 7
| 4
| 49
| 5
| 25
| 19
| 4
| 0.89
| 1
| 8
| 2
| 1
| 4
| 4
| 4
| 24
| 221
| 27
| 103
| 47
| 68
| 92
| 49
| 17
| 44
| 10
| 3
| 2
| 17
|
2,734
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/glpn/modeling_glpn.py
|
transformers.models.glpn.modeling_glpn.GLPNAttention
|
from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer
from torch import nn
class GLPNAttention(nn.Module):
def __init__(self, config, hidden_size, num_attention_heads, sequence_reduction_ratio):
super().__init__()
self.self = GLPNEfficientSelfAttention(config=config, hidden_size=hidden_size, num_attention_heads=num_attention_heads, sequence_reduction_ratio=sequence_reduction_ratio)
self.output = GLPNSelfOutput(config, hidden_size=hidden_size)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads)
self.self.query = prune_linear_layer(self.self.query, index)
self.self.key = prune_linear_layer(self.self.key, index)
self.self.value = prune_linear_layer(self.self.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(self, hidden_states, height, width, output_attentions=False):
self_outputs = self.self(hidden_states, height, width, output_attentions)
attention_output = self.output(self_outputs[0], hidden_states)
outputs = (attention_output,) + self_outputs[1:]
return outputs
|
class GLPNAttention(nn.Module):
def __init__(self, config, hidden_size, num_attention_heads, sequence_reduction_ratio):
pass
def prune_heads(self, heads):
pass
def forward(self, hidden_states, height, width, output_attentions=False):
pass
| 4
| 0
| 11
| 1
| 9
| 1
| 1
| 0.1
| 1
| 4
| 2
| 0
| 3
| 3
| 3
| 13
| 36
| 5
| 29
| 11
| 25
| 3
| 22
| 11
| 18
| 2
| 1
| 1
| 4
|
2,735
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/glpn/modeling_glpn.py
|
transformers.models.glpn.modeling_glpn.GLPNDWConv
|
from torch import nn
class GLPNDWConv(nn.Module):
def __init__(self, dim=768):
super().__init__()
self.dwconv = nn.Conv2d(dim, dim, 3, 1, 1, bias=True, groups=dim)
def forward(self, hidden_states, height, width):
batch_size, seq_len, num_channels = hidden_states.shape
hidden_states = hidden_states.transpose(1, 2).view(batch_size, num_channels, height, width)
hidden_states = self.dwconv(hidden_states)
hidden_states = hidden_states.flatten(2).transpose(1, 2)
return hidden_states
|
class GLPNDWConv(nn.Module):
def __init__(self, dim=768):
pass
def forward(self, hidden_states, height, width):
pass
| 3
| 0
| 5
| 1
| 5
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 2
| 1
| 2
| 12
| 12
| 2
| 10
| 5
| 7
| 0
| 10
| 5
| 7
| 1
| 1
| 0
| 2
|
2,736
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/glpn/modeling_glpn.py
|
transformers.models.glpn.modeling_glpn.GLPNDecoder
|
import torch
from torch import nn
class GLPNDecoder(nn.Module):
def __init__(self, config):
super().__init__()
reserved_hidden_sizes = config.hidden_sizes[::-1]
out_channels = config.decoder_hidden_size
self.stages = nn.ModuleList([GLPNDecoderStage(hidden_size, out_channels) for hidden_size in reserved_hidden_sizes])
self.stages[0].fusion = None
self.final_upsample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False)
def forward(self, hidden_states: list[torch.Tensor]) -> list[torch.Tensor]:
stage_hidden_states = []
stage_hidden_state = None
for hidden_state, stage in zip(hidden_states[::-1], self.stages):
stage_hidden_state = stage(hidden_state, stage_hidden_state)
stage_hidden_states.append(stage_hidden_state)
stage_hidden_states[-1] = self.final_upsample(stage_hidden_state)
return stage_hidden_states
|
class GLPNDecoder(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states: list[torch.Tensor]) -> list[torch.Tensor]:
pass
| 3
| 0
| 12
| 2
| 9
| 1
| 2
| 0.11
| 1
| 4
| 1
| 0
| 2
| 2
| 2
| 12
| 25
| 5
| 18
| 10
| 15
| 2
| 16
| 10
| 13
| 2
| 1
| 1
| 3
|
2,737
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/glpn/modeling_glpn.py
|
transformers.models.glpn.modeling_glpn.GLPNDecoderStage
|
from torch import nn
class GLPNDecoderStage(nn.Module):
def __init__(self, in_channels, out_channels):
super().__init__()
should_skip = in_channels == out_channels
self.convolution = nn.Conv2d(in_channels, out_channels, kernel_size=1) if not should_skip else nn.Identity()
self.fusion = GLPNSelectiveFeatureFusion(out_channels)
self.upsample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False)
def forward(self, hidden_state, residual=None):
hidden_state = self.convolution(hidden_state)
if residual is not None:
hidden_state = self.fusion(hidden_state, residual)
hidden_state = self.upsample(hidden_state)
return hidden_state
hidden_state = self.upsample(hidden_state)
return hidden_state
|
class GLPNDecoderStage(nn.Module):
def __init__(self, in_channels, out_channels):
pass
def forward(self, hidden_state, residual=None):
pass
| 3
| 0
| 8
| 1
| 7
| 0
| 2
| 0
| 1
| 2
| 1
| 0
| 2
| 3
| 2
| 12
| 18
| 3
| 15
| 7
| 12
| 0
| 15
| 7
| 12
| 2
| 1
| 1
| 4
|
2,738
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/glpn/modeling_glpn.py
|
transformers.models.glpn.modeling_glpn.GLPNDepthEstimationHead
|
import torch
from torch import nn
class GLPNDepthEstimationHead(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
channels = config.decoder_hidden_size
self.head = nn.Sequential(nn.Conv2d(channels, channels, kernel_size=3, stride=1, padding=1), nn.ReLU(inplace=False), nn.Conv2d(channels, 1, kernel_size=3, stride=1, padding=1))
def forward(self, hidden_states: list[torch.Tensor]) -> torch.Tensor:
hidden_states = hidden_states[self.config.head_in_index]
hidden_states = self.head(hidden_states)
predicted_depth = torch.sigmoid(hidden_states) * self.config.max_depth
predicted_depth = predicted_depth.squeeze(dim=1)
return predicted_depth
|
class GLPNDepthEstimationHead(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states: list[torch.Tensor]) -> torch.Tensor:
pass
| 3
| 0
| 11
| 3
| 8
| 1
| 1
| 0.06
| 1
| 2
| 0
| 0
| 2
| 2
| 2
| 12
| 23
| 6
| 16
| 7
| 13
| 1
| 12
| 7
| 9
| 1
| 1
| 0
| 2
|
2,739
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/glpn/modeling_glpn.py
|
transformers.models.glpn.modeling_glpn.GLPNDropPath
|
import torch
from torch import nn
from typing import Optional, Union
class GLPNDropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
def __init__(self, drop_prob: Optional[float]=None) -> None:
super().__init__()
self.drop_prob = drop_prob
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
return drop_path(hidden_states, self.drop_prob, self.training)
def extra_repr(self) -> str:
return f'p={self.drop_prob}'
|
class GLPNDropPath(nn.Module):
'''Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).'''
def __init__(self, drop_prob: Optional[float]=None) -> None:
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
def extra_repr(self) -> str:
pass
| 4
| 1
| 2
| 0
| 2
| 0
| 1
| 0.13
| 1
| 4
| 0
| 0
| 3
| 1
| 3
| 13
| 12
| 3
| 8
| 5
| 4
| 1
| 8
| 5
| 4
| 1
| 1
| 0
| 3
|
2,740
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/glpn/modeling_glpn.py
|
transformers.models.glpn.modeling_glpn.GLPNEfficientSelfAttention
|
from torch import nn
import math
import torch
class GLPNEfficientSelfAttention(nn.Module):
"""SegFormer's efficient self-attention mechanism. Employs the sequence reduction process introduced in the [PvT
paper](https://huggingface.co/papers/2102.12122)."""
def __init__(self, config, hidden_size, num_attention_heads, sequence_reduction_ratio):
super().__init__()
self.hidden_size = hidden_size
self.num_attention_heads = num_attention_heads
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(f'The hidden size ({self.hidden_size}) is not a multiple of the number of attention heads ({self.num_attention_heads})')
self.attention_head_size = int(self.hidden_size / self.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(self.hidden_size, self.all_head_size)
self.key = nn.Linear(self.hidden_size, self.all_head_size)
self.value = nn.Linear(self.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
self.sr_ratio = sequence_reduction_ratio
if sequence_reduction_ratio > 1:
self.sr = nn.Conv2d(hidden_size, hidden_size, kernel_size=sequence_reduction_ratio, stride=sequence_reduction_ratio)
self.layer_norm = nn.LayerNorm(hidden_size)
def forward(self, hidden_states, height, width, output_attentions=False):
batch_size, seq_length, _ = hidden_states.shape
query_layer = self.query(hidden_states).view(batch_size, -1, self.num_attention_heads, self.attention_head_size).transpose(1, 2)
if self.sr_ratio > 1:
batch_size, seq_len, num_channels = hidden_states.shape
hidden_states = hidden_states.permute(0, 2, 1).reshape(batch_size, num_channels, height, width)
hidden_states = self.sr(hidden_states)
hidden_states = hidden_states.reshape(batch_size, num_channels, -1).permute(0, 2, 1)
hidden_states = self.layer_norm(hidden_states)
key_layer = self.key(hidden_states).view(batch_size, -1, self.num_attention_heads, self.attention_head_size).transpose(1, 2)
value_layer = self.value(hidden_states).view(batch_size, -1, self.num_attention_heads, self.attention_head_size).transpose(1, 2)
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
attention_probs = nn.functional.softmax(attention_scores, dim=-1)
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(new_context_layer_shape)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
return outputs
|
class GLPNEfficientSelfAttention(nn.Module):
'''SegFormer's efficient self-attention mechanism. Employs the sequence reduction process introduced in the [PvT
paper](https://huggingface.co/papers/2102.12122).'''
def __init__(self, config, hidden_size, num_attention_heads, sequence_reduction_ratio):
pass
def forward(self, hidden_states, height, width, output_attentions=False):
pass
| 3
| 1
| 24
| 5
| 17
| 2
| 2
| 0.17
| 1
| 3
| 0
| 0
| 3
| 11
| 3
| 13
| 79
| 18
| 52
| 31
| 42
| 9
| 41
| 25
| 37
| 3
| 1
| 1
| 7
|
2,741
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/glpn/modeling_glpn.py
|
transformers.models.glpn.modeling_glpn.GLPNEncoder
|
from ...modeling_outputs import BaseModelOutput, DepthEstimatorOutput
from torch import nn
import torch
class GLPNEncoder(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
dpr = [x.item() for x in torch.linspace(0, config.drop_path_rate, sum(config.depths), device='cpu')]
embeddings = []
for i in range(config.num_encoder_blocks):
embeddings.append(GLPNOverlapPatchEmbeddings(patch_size=config.patch_sizes[i], stride=config.strides[i], num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1], hidden_size=config.hidden_sizes[i]))
self.patch_embeddings = nn.ModuleList(embeddings)
blocks = []
cur = 0
for i in range(config.num_encoder_blocks):
layers = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i]):
layers.append(GLPNLayer(config, hidden_size=config.hidden_sizes[i], num_attention_heads=config.num_attention_heads[i], drop_path=dpr[cur + j], sequence_reduction_ratio=config.sr_ratios[i], mlp_ratio=config.mlp_ratios[i]))
blocks.append(nn.ModuleList(layers))
self.block = nn.ModuleList(blocks)
self.layer_norm = nn.ModuleList([nn.LayerNorm(config.hidden_sizes[i]) for i in range(config.num_encoder_blocks)])
def forward(self, pixel_values, output_attentions=False, output_hidden_states=False, return_dict=True):
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
batch_size = pixel_values.shape[0]
hidden_states = pixel_values
for idx, x in enumerate(zip(self.patch_embeddings, self.block, self.layer_norm)):
embedding_layer, block_layer, norm_layer = x
hidden_states, height, width = embedding_layer(hidden_states)
for i, blk in enumerate(block_layer):
layer_outputs = blk(hidden_states, height, width, output_attentions)
hidden_states = layer_outputs[0]
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
hidden_states = norm_layer(hidden_states)
hidden_states = hidden_states.reshape(batch_size, height, width, -1).permute(0, 3, 1, 2).contiguous()
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple((v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None))
return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions)
|
class GLPNEncoder(nn.Module):
def __init__(self, config):
pass
def forward(self, pixel_values, output_attentions=False, output_hidden_states=False, return_dict=True):
pass
| 3
| 0
| 42
| 4
| 34
| 5
| 7
| 0.13
| 1
| 8
| 3
| 0
| 2
| 4
| 2
| 12
| 86
| 9
| 68
| 29
| 59
| 9
| 40
| 23
| 37
| 8
| 1
| 3
| 14
|
2,742
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/glpn/modeling_glpn.py
|
transformers.models.glpn.modeling_glpn.GLPNForDepthEstimation
|
from ...utils import auto_docstring, logging
import torch
from ...modeling_outputs import BaseModelOutput, DepthEstimatorOutput
from typing import Optional, Union
@auto_docstring(custom_intro='\n GLPN Model transformer with a lightweight depth estimation head on top e.g. for KITTI, NYUv2.\n ')
class GLPNForDepthEstimation(GLPNPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.glpn = GLPNModel(config)
self.decoder = GLPNDecoder(config)
self.head = GLPNDepthEstimationHead(config)
self.post_init()
@auto_docstring
def forward(self, pixel_values: torch.FloatTensor, labels: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.Tensor], DepthEstimatorOutput]:
"""
labels (`torch.FloatTensor` of shape `(batch_size, height, width)`, *optional*):
Ground truth depth estimation maps for computing the loss.
Examples:
```python
>>> from transformers import AutoImageProcessor, GLPNForDepthEstimation
>>> import torch
>>> import numpy as np
>>> from PIL import Image
>>> import requests
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> image_processor = AutoImageProcessor.from_pretrained("vinvino02/glpn-kitti")
>>> model = GLPNForDepthEstimation.from_pretrained("vinvino02/glpn-kitti")
>>> # prepare image for the model
>>> inputs = image_processor(images=image, return_tensors="pt")
>>> with torch.no_grad():
... outputs = model(**inputs)
>>> # interpolate to original size
>>> post_processed_output = image_processor.post_process_depth_estimation(
... outputs,
... target_sizes=[(image.height, image.width)],
... )
>>> # visualize the prediction
>>> predicted_depth = post_processed_output[0]["predicted_depth"]
>>> depth = predicted_depth * 255 / predicted_depth.max()
>>> depth = depth.detach().cpu().numpy()
>>> depth = Image.fromarray(depth.astype("uint8"))
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
outputs = self.glpn(pixel_values, output_attentions=output_attentions, output_hidden_states=True, return_dict=return_dict)
hidden_states = outputs.hidden_states if return_dict else outputs[1]
out = self.decoder(hidden_states)
predicted_depth = self.head(out)
loss = None
if labels is not None:
loss_fct = SiLogLoss()
loss = loss_fct(predicted_depth, labels)
if not return_dict:
if output_hidden_states:
output = (predicted_depth,) + outputs[1:]
else:
output = (predicted_depth,) + outputs[2:]
return (loss,) + output if loss is not None else output
return DepthEstimatorOutput(loss=loss, predicted_depth=predicted_depth, hidden_states=outputs.hidden_states if output_hidden_states else None, attentions=outputs.attentions)
|
@auto_docstring(custom_intro='\n GLPN Model transformer with a lightweight depth estimation head on top e.g. for KITTI, NYUv2.\n ')
class GLPNForDepthEstimation(GLPNPreTrainedModel):
def __init__(self, config):
pass
@auto_docstring
def forward(self, pixel_values: torch.FloatTensor, labels: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.Tensor], DepthEstimatorOutput]:
'''
labels (`torch.FloatTensor` of shape `(batch_size, height, width)`, *optional*):
Ground truth depth estimation maps for computing the loss.
Examples:
```python
>>> from transformers import AutoImageProcessor, GLPNForDepthEstimation
>>> import torch
>>> import numpy as np
>>> from PIL import Image
>>> import requests
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> image_processor = AutoImageProcessor.from_pretrained("vinvino02/glpn-kitti")
>>> model = GLPNForDepthEstimation.from_pretrained("vinvino02/glpn-kitti")
>>> # prepare image for the model
>>> inputs = image_processor(images=image, return_tensors="pt")
>>> with torch.no_grad():
... outputs = model(**inputs)
>>> # interpolate to original size
>>> post_processed_output = image_processor.post_process_depth_estimation(
... outputs,
... target_sizes=[(image.height, image.width)],
... )
>>> # visualize the prediction
>>> predicted_depth = post_processed_output[0]["predicted_depth"]
>>> depth = predicted_depth * 255 / predicted_depth.max()
>>> depth = depth.detach().cpu().numpy()
>>> depth = Image.fromarray(depth.astype("uint8"))
```'''
pass
| 5
| 1
| 46
| 9
| 22
| 16
| 5
| 0.7
| 1
| 8
| 5
| 0
| 2
| 3
| 2
| 3
| 95
| 18
| 46
| 21
| 34
| 32
| 24
| 13
| 21
| 9
| 2
| 2
| 10
|
2,743
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/glpn/modeling_glpn.py
|
transformers.models.glpn.modeling_glpn.GLPNLayer
|
from torch import nn
class GLPNLayer(nn.Module):
"""This corresponds to the Block class in the original implementation."""
def __init__(self, config, hidden_size, num_attention_heads, drop_path, sequence_reduction_ratio, mlp_ratio):
super().__init__()
self.layer_norm_1 = nn.LayerNorm(hidden_size)
self.attention = GLPNAttention(config, hidden_size=hidden_size, num_attention_heads=num_attention_heads, sequence_reduction_ratio=sequence_reduction_ratio)
self.drop_path = GLPNDropPath(drop_path) if drop_path > 0.0 else nn.Identity()
self.layer_norm_2 = nn.LayerNorm(hidden_size)
mlp_hidden_size = int(hidden_size * mlp_ratio)
self.mlp = GLPNMixFFN(config, in_features=hidden_size, hidden_features=mlp_hidden_size)
def forward(self, hidden_states, height, width, output_attentions=False):
self_attention_outputs = self.attention(self.layer_norm_1(hidden_states), height, width, output_attentions=output_attentions)
attention_output = self_attention_outputs[0]
outputs = self_attention_outputs[1:]
attention_output = self.drop_path(attention_output)
hidden_states = attention_output + hidden_states
mlp_output = self.mlp(self.layer_norm_2(hidden_states), height, width)
mlp_output = self.drop_path(mlp_output)
layer_output = mlp_output + hidden_states
outputs = (layer_output,) + outputs
return outputs
|
class GLPNLayer(nn.Module):
'''This corresponds to the Block class in the original implementation.'''
def __init__(self, config, hidden_size, num_attention_heads, drop_path, sequence_reduction_ratio, mlp_ratio):
pass
def forward(self, hidden_states, height, width, output_attentions=False):
pass
| 3
| 1
| 19
| 3
| 15
| 2
| 2
| 0.17
| 1
| 5
| 3
| 0
| 2
| 5
| 2
| 12
| 41
| 8
| 30
| 14
| 27
| 5
| 20
| 14
| 17
| 2
| 1
| 0
| 3
|
2,744
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/glpn/modeling_glpn.py
|
transformers.models.glpn.modeling_glpn.GLPNMixFFN
|
from torch import nn
from ...activations import ACT2FN
class GLPNMixFFN(nn.Module):
def __init__(self, config, in_features, hidden_features=None, out_features=None):
super().__init__()
out_features = out_features or in_features
self.dense1 = nn.Linear(in_features, hidden_features)
self.dwconv = GLPNDWConv(hidden_features)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
self.dense2 = nn.Linear(hidden_features, out_features)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, height, width):
hidden_states = self.dense1(hidden_states)
hidden_states = self.dwconv(hidden_states, height, width)
hidden_states = self.intermediate_act_fn(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.dense2(hidden_states)
hidden_states = self.dropout(hidden_states)
return hidden_states
|
class GLPNMixFFN(nn.Module):
def __init__(self, config, in_features, hidden_features=None, out_features=None):
pass
def forward(self, hidden_states, height, width):
pass
| 3
| 0
| 10
| 0
| 10
| 0
| 2
| 0
| 1
| 3
| 1
| 0
| 2
| 5
| 2
| 12
| 21
| 1
| 20
| 8
| 17
| 0
| 19
| 8
| 16
| 2
| 1
| 1
| 3
|
2,745
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/glpn/modeling_glpn.py
|
transformers.models.glpn.modeling_glpn.GLPNModel
|
from typing import Optional, Union
from ...utils import auto_docstring, logging
import torch
from ...modeling_outputs import BaseModelOutput, DepthEstimatorOutput
@auto_docstring
class GLPNModel(GLPNPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.config = config
self.encoder = GLPNEncoder(config)
self.post_init()
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@auto_docstring
def forward(self, pixel_values: torch.FloatTensor, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutput]:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
encoder_outputs = self.encoder(pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
sequence_output = encoder_outputs[0]
if not return_dict:
return (sequence_output,) + encoder_outputs[1:]
return BaseModelOutput(last_hidden_state=sequence_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions)
|
@auto_docstring
class GLPNModel(GLPNPreTrainedModel):
def __init__(self, config):
pass
def _prune_heads(self, heads_to_prune):
'''
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
'''
pass
@auto_docstring
def forward(self, pixel_values: torch.FloatTensor, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutput]:
pass
| 6
| 1
| 15
| 2
| 11
| 2
| 3
| 0.19
| 1
| 4
| 2
| 0
| 3
| 2
| 3
| 4
| 58
| 7
| 43
| 16
| 25
| 8
| 18
| 9
| 14
| 5
| 2
| 1
| 8
|
2,746
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/glpn/modeling_glpn.py
|
transformers.models.glpn.modeling_glpn.GLPNOverlapPatchEmbeddings
|
from torch import nn
class GLPNOverlapPatchEmbeddings(nn.Module):
"""Construct the overlapping patch embeddings."""
def __init__(self, patch_size, stride, num_channels, hidden_size):
super().__init__()
self.proj = nn.Conv2d(num_channels, hidden_size, kernel_size=patch_size, stride=stride, padding=patch_size // 2)
self.layer_norm = nn.LayerNorm(hidden_size)
def forward(self, pixel_values):
embeddings = self.proj(pixel_values)
_, _, height, width = embeddings.shape
embeddings = embeddings.flatten(2).transpose(1, 2)
embeddings = self.layer_norm(embeddings)
return (embeddings, height, width)
|
class GLPNOverlapPatchEmbeddings(nn.Module):
'''Construct the overlapping patch embeddings.'''
def __init__(self, patch_size, stride, num_channels, hidden_size):
pass
def forward(self, pixel_values):
pass
| 3
| 1
| 10
| 1
| 8
| 1
| 1
| 0.18
| 1
| 1
| 0
| 0
| 2
| 2
| 2
| 12
| 23
| 3
| 17
| 7
| 14
| 3
| 11
| 7
| 8
| 1
| 1
| 0
| 2
|
2,747
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/glpn/modeling_glpn.py
|
transformers.models.glpn.modeling_glpn.GLPNPreTrainedModel
|
from torch import nn
from ...modeling_utils import PreTrainedModel
from ...utils import auto_docstring, logging
from .configuration_glpn import GLPNConfig
@auto_docstring
class GLPNPreTrainedModel(PreTrainedModel):
config: GLPNConfig
base_model_prefix = 'glpn'
main_input_name = 'pixel_values'
_no_split_modules = []
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, (nn.Linear, nn.Conv2d)):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, (nn.LayerNorm, nn.BatchNorm2d)):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
|
@auto_docstring
class GLPNPreTrainedModel(PreTrainedModel):
def _init_weights(self, module):
'''Initialize the weights'''
pass
| 3
| 1
| 15
| 0
| 12
| 3
| 6
| 0.47
| 1
| 0
| 0
| 2
| 1
| 0
| 1
| 1
| 27
| 2
| 17
| 6
| 15
| 8
| 15
| 6
| 13
| 6
| 1
| 2
| 6
|
2,748
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/glpn/modeling_glpn.py
|
transformers.models.glpn.modeling_glpn.GLPNSelectiveFeatureFusion
|
import torch
from torch import nn
class GLPNSelectiveFeatureFusion(nn.Module):
"""
Selective Feature Fusion module, as explained in the [paper](https://huggingface.co/papers/2201.07436) (section 3.4). This
module adaptively selects and integrates local and global features by attaining an attention map for each feature.
"""
def __init__(self, in_channel=64):
super().__init__()
self.convolutional_layer1 = nn.Sequential(nn.Conv2d(in_channels=int(in_channel * 2), out_channels=in_channel, kernel_size=3, stride=1, padding=1), nn.BatchNorm2d(in_channel), nn.ReLU())
self.convolutional_layer2 = nn.Sequential(nn.Conv2d(in_channels=in_channel, out_channels=int(in_channel / 2), kernel_size=3, stride=1, padding=1), nn.BatchNorm2d(int(in_channel / 2)), nn.ReLU())
self.convolutional_layer3 = nn.Conv2d(in_channels=int(in_channel / 2), out_channels=2, kernel_size=3, stride=1, padding=1)
self.sigmoid = nn.Sigmoid()
def forward(self, local_features, global_features):
features = torch.cat((local_features, global_features), dim=1)
features = self.convolutional_layer1(features)
features = self.convolutional_layer2(features)
features = self.convolutional_layer3(features)
attn = self.sigmoid(features)
hybrid_features = local_features * attn[:, 0, :, :].unsqueeze(1) + global_features * attn[:, 1, :, :].unsqueeze(1)
return hybrid_features
|
class GLPNSelectiveFeatureFusion(nn.Module):
'''
Selective Feature Fusion module, as explained in the [paper](https://huggingface.co/papers/2201.07436) (section 3.4). This
module adaptively selects and integrates local and global features by attaining an attention map for each feature.
'''
def __init__(self, in_channel=64):
pass
def forward(self, local_features, global_features):
pass
| 3
| 1
| 18
| 3
| 13
| 2
| 1
| 0.3
| 1
| 2
| 0
| 0
| 2
| 4
| 2
| 12
| 42
| 7
| 27
| 10
| 24
| 8
| 15
| 10
| 12
| 1
| 1
| 0
| 2
|
2,749
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/glpn/modeling_glpn.py
|
transformers.models.glpn.modeling_glpn.GLPNSelfOutput
|
from torch import nn
class GLPNSelfOutput(nn.Module):
def __init__(self, config, hidden_size):
super().__init__()
self.dense = nn.Linear(hidden_size, hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
return hidden_states
|
class GLPNSelfOutput(nn.Module):
def __init__(self, config, hidden_size):
pass
def forward(self, hidden_states, input_tensor):
pass
| 3
| 0
| 4
| 0
| 4
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 2
| 2
| 2
| 12
| 10
| 1
| 9
| 5
| 6
| 0
| 9
| 5
| 6
| 1
| 1
| 0
| 2
|
2,750
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/glpn/modeling_glpn.py
|
transformers.models.glpn.modeling_glpn.SiLogLoss
|
import torch
from torch import nn
class SiLogLoss(nn.Module):
"""
Implements the Scale-invariant log scale loss [Eigen et al., 2014](https://huggingface.co/papers/1406.2283).
$$L=\\frac{1}{n} \\sum_{i} d_{i}^{2}-\\frac{1}{2 n^{2}}\\left(\\sum_{i} d_{i}^{2}\\right)$$ where $d_{i}=\\log y_{i}-\\log
y_{i}^{*}$.
"""
def __init__(self, lambd=0.5):
super().__init__()
self.lambd = lambd
def forward(self, pred, target):
valid_mask = (target > 0).detach()
diff_log = torch.log(target[valid_mask]) - torch.log(pred[valid_mask])
loss = torch.sqrt(torch.pow(diff_log, 2).mean() - self.lambd * torch.pow(diff_log.mean(), 2))
return loss
|
class SiLogLoss(nn.Module):
'''
Implements the Scale-invariant log scale loss [Eigen et al., 2014](https://huggingface.co/papers/1406.2283).
$$L=\frac{1}{n} \sum_{i} d_{i}^{2}-\frac{1}{2 n^{2}}\left(\sum_{i} d_{i}^{2}\right)$$ where $d_{i}=\log y_{i}-\log
y_{i}^{*}$.
'''
def __init__(self, lambd=0.5):
pass
def forward(self, pred, target):
pass
| 3
| 1
| 5
| 1
| 4
| 0
| 1
| 0.56
| 1
| 1
| 0
| 0
| 2
| 1
| 2
| 12
| 19
| 5
| 9
| 7
| 6
| 5
| 9
| 7
| 6
| 1
| 1
| 0
| 2
|
2,751
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/got_ocr2/convert_got_ocr2_weights_to_hf.py
|
transformers.models.got_ocr2.convert_got_ocr2_weights_to_hf.GotOcr2Converter
|
from transformers import GotOcr2Config, GotOcr2ForConditionalGeneration, GotOcr2ImageProcessor, GotOcr2Processor, PreTrainedTokenizerFast, is_vision_available
from transformers.convert_slow_tokenizer import TikTokenConverter
from typing import Optional
class GotOcr2Converter(TikTokenConverter):
def __init__(self, vocab_file, special_tokens: list[str], pattern: str, model_max_length: int, chat_template: Optional[str]=None, **kwargs):
super().__init__(vocab_file, pattern=pattern)
self.additional_special_tokens = special_tokens
tokenizer = self.converted()
if chat_template is not None:
kwargs['chat_template'] = chat_template
self.tokenizer = PreTrainedTokenizerFast(tokenizer_object=tokenizer, model_input_names=['input_ids', 'attention_mask'], model_max_length=model_max_length, **kwargs)
|
class GotOcr2Converter(TikTokenConverter):
def __init__(self, vocab_file, special_tokens: list[str], pattern: str, model_max_length: int, chat_template: Optional[str]=None, **kwargs):
pass
| 2
| 0
| 20
| 0
| 20
| 0
| 2
| 0
| 1
| 4
| 0
| 0
| 1
| 2
| 1
| 5
| 21
| 0
| 21
| 13
| 11
| 0
| 8
| 5
| 6
| 2
| 1
| 1
| 2
|
2,752
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/got_ocr2/image_processing_got_ocr2.py
|
transformers.models.got_ocr2.image_processing_got_ocr2.GotOcr2ImageProcessor
|
from ...image_transforms import convert_to_rgb, resize, to_channel_dimension_format
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...utils import TensorType, filter_out_non_signature_kwargs, is_vision_available, logging
from typing import Optional, Union
from ...image_utils import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, infer_channel_dimension_format, is_scaled_image, make_flat_list_of_images, to_numpy_array, valid_images, validate_preprocess_arguments
class GotOcr2ImageProcessor(BaseImageProcessor):
"""
Constructs a GOT_OCR2 image processor.
Args:
do_resize (`bool`, *optional*, defaults to `True`):
Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by the
`do_resize` parameter in the `preprocess` method.
size (`dict`, *optional*, defaults to `{"height": 384, "width": 384}`):
Size of the output image after resizing. Can be overridden by the `size` parameter in the `preprocess`
method.
crop_to_patches (`bool`, *optional*, defaults to `False`):
Whether to crop the image to patches. Can be overridden by the `crop_to_patches` parameter in the
`preprocess` method.
min_patches (`int`, *optional*, defaults to 1):
The minimum number of patches to be extracted from the image. Only has an effect if `crop_to_patches` is
set to `True`. Can be overridden by the `min_patches` parameter in the `preprocess` method.
max_patches (`int`, *optional*, defaults to 12):
The maximum number of patches to be extracted from the image. Only has an effect if `crop_to_patches` is
set to `True`. Can be overridden by the `max_patches` parameter in the `preprocess` method.
resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`):
Resampling filter to use if resizing the image. Only has an effect if `do_resize` is set to `True`. Can be
overridden by the `resample` parameter in the `preprocess` method.
do_rescale (`bool`, *optional*, defaults to `True`):
Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the
`do_rescale` parameter in the `preprocess` method.
rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
Scale factor to use if rescaling the image. Only has an effect if `do_rescale` is set to `True`. Can be
overridden by the `rescale_factor` parameter in the `preprocess` method.
do_normalize (`bool`, *optional*, defaults to `True`):
Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess`
method. Can be overridden by the `do_normalize` parameter in the `preprocess` method.
image_mean (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`):
Mean to use if normalizing the image. This is a float or list of floats the length of the number of
channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. Can be
overridden by the `image_mean` parameter in the `preprocess` method.
image_std (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`):
Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
Can be overridden by the `image_std` parameter in the `preprocess` method.
do_convert_rgb (`bool`, *optional*, defaults to `True`):
Whether to convert the image to RGB.
"""
model_input_names = ['pixel_values']
def __init__(self, do_resize: bool=True, size: Optional[dict[str, int]]=None, crop_to_patches: bool=False, min_patches: int=1, max_patches: int=12, resample: PILImageResampling=PILImageResampling.BICUBIC, do_rescale: bool=True, rescale_factor: Union[int, float]=1 / 255, do_normalize: bool=True, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, do_convert_rgb: bool=True, **kwargs) -> None:
super().__init__(**kwargs)
size = size if size is not None else {'height': 384, 'width': 384}
size = get_size_dict(size, default_to_square=True)
self.do_resize = do_resize
self.size = size
self.crop_to_patches = crop_to_patches
self.min_patches = min_patches
self.max_patches = max_patches
self.resample = resample
self.do_rescale = do_rescale
self.rescale_factor = rescale_factor
self.do_normalize = do_normalize
self.image_mean = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
self.image_std = image_std if image_std is not None else OPENAI_CLIP_STD
self.do_convert_rgb = do_convert_rgb
def resize(self, image: np.ndarray, size: dict[str, int], resample: PILImageResampling=PILImageResampling.BICUBIC, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray:
"""
Resize an image to `(size["height"], size["width"])`.
Args:
image (`np.ndarray`):
Image to resize.
size (`dict[str, int]`):
Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image.
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
`PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BICUBIC`.
data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the output image. If unset, the channel dimension format of the input
image is used. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
Returns:
`np.ndarray`: The resized image.
"""
size = get_size_dict(size)
if 'height' not in size or 'width' not in size:
raise ValueError(f'The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}')
output_size = (size['height'], size['width'])
return resize(image, size=output_size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs)
@filter_out_non_signature_kwargs()
def preprocess(self, images: ImageInput, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, crop_to_patches: Optional[bool]=None, min_patches: Optional[int]=None, max_patches: Optional[int]=None, resample: Optional[PILImageResampling]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, return_tensors: Optional[Union[str, TensorType]]=None, do_convert_rgb: Optional[bool]=None, data_format: ChannelDimension=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> PIL.Image.Image:
"""
Preprocess an image or batch of images.
Args:
images (`ImageInput`):
Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
passing in images with pixel values between 0 and 1, set `do_rescale=False`.
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
Whether to resize the image.
size (`dict[str, int]`, *optional*, defaults to `self.size`):
Controls the size of the image after `resize`. The shortest edge of the image is resized to
`size["shortest_edge"]` whilst preserving the aspect ratio. If the longest edge of this resized image
is > `int(size["shortest_edge"] * (1333 / 800))`, then the image is resized again to make the longest
edge equal to `int(size["shortest_edge"] * (1333 / 800))`.
crop_to_patches (`bool`, *optional*, defaults to `self.crop_to_patches`):
Whether to crop the image to patches.
min_patches (`int`, *optional*, defaults to `self.min_patches`):
The minimum number of patches to be extracted from the image. Only has an effect if `crop_to_patches` is
set to `True`.
max_patches (`int`, *optional*, defaults to `self.max_patches`):
The maximum number of patches to be extracted from the image. Only has an effect if `crop_to_patches` is
set to `True`.
resample (`PILImageResampling`, *optional*, defaults to `self.resample`):
Resampling filter to use if resizing the image. Only has an effect if `do_resize` is set to `True`.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the image values between [0 - 1].
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
Whether to normalize the image.
image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`):
Image mean to normalize the image by if `do_normalize` is set to `True`.
image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`):
Image standard deviation to normalize the image by if `do_normalize` is set to `True`.
do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`):
Whether to convert the image to RGB.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- Unset: Use the channel dimension format of the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
"""
do_resize = do_resize if do_resize is not None else self.do_resize
crop_to_patches = crop_to_patches if crop_to_patches is not None else self.crop_to_patches
min_patches = min_patches if min_patches is not None else self.min_patches
max_patches = max_patches if max_patches is not None else self.max_patches
resample = resample if resample is not None else self.resample
do_rescale = do_rescale if do_rescale is not None else self.do_rescale
rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
do_normalize = do_normalize if do_normalize is not None else self.do_normalize
image_mean = image_mean if image_mean is not None else self.image_mean
image_std = image_std if image_std is not None else self.image_std
do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
size = size if size is not None else self.size
size = get_size_dict(size, default_to_square=False)
images = self.fetch_images(images)
images = make_flat_list_of_images(images)
if not valid_images(images):
raise ValueError('Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, or torch.Tensor')
validate_preprocess_arguments(do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_resize=do_resize, size=size, resample=resample)
if do_convert_rgb:
images = [convert_to_rgb(image) for image in images]
images = [to_numpy_array(image) for image in images]
if do_rescale and is_scaled_image(images[0]):
logger.warning_once('It looks like you are trying to rescale already rescaled images. If the input images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again.')
if input_data_format is None:
input_data_format = infer_channel_dimension_format(images[0])
if crop_to_patches and max_patches > 1:
images = [self.crop_image_to_patches(image, min_patches=min_patches, max_patches=max_patches, patch_size=size, data_format=input_data_format) for image in images]
num_patches = np.array([len(image) for image in images])
images = [image for images_list in images for image in images_list]
else:
num_patches = np.array([1] * len(images))
for i, image in enumerate(images):
if do_resize:
images[i] = self.resize(image, size=size, resample=resample, input_data_format=input_data_format)
if do_rescale:
images[i] = self.rescale(image=images[i], scale=rescale_factor, input_data_format=input_data_format)
if do_normalize:
images[i] = self.normalize(image=images[i], mean=image_mean, std=image_std, input_data_format=input_data_format)
images[i] = to_channel_dimension_format(images[i], data_format, input_channel_dim=input_data_format)
encoded_outputs = BatchFeature(data={'pixel_values': images, 'num_patches': num_patches}, tensor_type=return_tensors)
return encoded_outputs
def crop_image_to_patches(self, images: np.ndarray, min_patches: int, max_patches: int, use_thumbnail: bool=True, patch_size: Optional[Union[tuple, int, dict]]=None, data_format: Optional[ChannelDimension]=None):
"""
Crop the image to patches and return a list of cropped images.
The number of patches and their grid arrangement are determined by the original image size,
the target patch size and the minimum and maximum number of patches.
The aspect ratio of the patches grid is chosen to be the closest to the original image aspect ratio.
Args:
images (`np.ndarray`):
The image to be cropped.
min_patches (`int`):
The minimum number of patches to be extracted from the image.
max_patches (`int`):
The maximum number of patches to be extracted from the image.
use_thumbnail (`bool`, *optional*, defaults to `True`):
Whether to add a thumbnail image to the list of cropped patches.
patch_size (`int`, `tuple[int, int]`, `dict`, *optional*):
The size of the output patches.
data_format (`ChannelDimension`, *optional*):
The format of the image data. If `None`, the format is inferred from the input image.
Returns:
list[`PIL.Image.Image`] or list[np.ndarray]: The list of cropped images.
"""
if data_format is None:
data_format = infer_channel_dimension_format(images)
images = to_channel_dimension_format(images, ChannelDimension.FIRST, data_format)
patch_size_height, patch_size_width = (patch_size['height'], patch_size['width'])
original_height, original_width = images.shape[-2:]
num_columns, num_rows = get_optimal_tiled_canvas((original_height, original_width), (patch_size_height, patch_size_width), min_patches, max_patches)
target_width = patch_size_width * num_columns
target_height = patch_size_height * num_rows
num_blocks = num_columns * num_rows
resized_image = self.resize(images, {'height': target_height, 'width': target_width}, data_format=ChannelDimension.FIRST, input_data_format=ChannelDimension.FIRST)
processed_images = []
for i in range(num_blocks):
column = i % num_columns
row = i // num_columns
box = (column * patch_size_width, row * patch_size_height, (column + 1) * patch_size_width, (row + 1) * patch_size_height)
patch_image = resized_image[..., box[1]:box[3], box[0]:box[2]]
patch_image = to_channel_dimension_format(patch_image, data_format, ChannelDimension.FIRST)
processed_images.append(patch_image)
if use_thumbnail and len(processed_images) != 1:
thumbnail_img = self.resize(images, patch_size, data_format=data_format, input_data_format=ChannelDimension.FIRST)
processed_images.append(thumbnail_img)
return processed_images
def get_number_of_image_patches(self, height: int, width: int, images_kwargs=None):
"""
A utility that returns number patches for a given image size.
Args:
height (`int`):
Height of the input image.
width (`int`):
Width of the input image.
images_kwargs (`dict`, *optional*)
Any kwargs to override defaults of the image processor.
Returns:
`int`: Number of patches per image.
"""
min_patches = images_kwargs.get('min_patches', self.min_patches)
max_patches = images_kwargs.get('max_patches', self.max_patches)
patch_size = images_kwargs.get('patch_size', self.size)
crop_to_patches = images_kwargs.get('crop_to_patches', self.crop_to_patches)
num_patches = 1
if crop_to_patches and max_patches > 1:
num_columns, num_rows = get_optimal_tiled_canvas((height, width), (patch_size['height'], patch_size['width']), min_patches, max_patches)
if num_columns * num_rows > 1:
num_patches += num_columns * num_rows
return num_patches
|
class GotOcr2ImageProcessor(BaseImageProcessor):
'''
Constructs a GOT_OCR2 image processor.
Args:
do_resize (`bool`, *optional*, defaults to `True`):
Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by the
`do_resize` parameter in the `preprocess` method.
size (`dict`, *optional*, defaults to `{"height": 384, "width": 384}`):
Size of the output image after resizing. Can be overridden by the `size` parameter in the `preprocess`
method.
crop_to_patches (`bool`, *optional*, defaults to `False`):
Whether to crop the image to patches. Can be overridden by the `crop_to_patches` parameter in the
`preprocess` method.
min_patches (`int`, *optional*, defaults to 1):
The minimum number of patches to be extracted from the image. Only has an effect if `crop_to_patches` is
set to `True`. Can be overridden by the `min_patches` parameter in the `preprocess` method.
max_patches (`int`, *optional*, defaults to 12):
The maximum number of patches to be extracted from the image. Only has an effect if `crop_to_patches` is
set to `True`. Can be overridden by the `max_patches` parameter in the `preprocess` method.
resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`):
Resampling filter to use if resizing the image. Only has an effect if `do_resize` is set to `True`. Can be
overridden by the `resample` parameter in the `preprocess` method.
do_rescale (`bool`, *optional*, defaults to `True`):
Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the
`do_rescale` parameter in the `preprocess` method.
rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
Scale factor to use if rescaling the image. Only has an effect if `do_rescale` is set to `True`. Can be
overridden by the `rescale_factor` parameter in the `preprocess` method.
do_normalize (`bool`, *optional*, defaults to `True`):
Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess`
method. Can be overridden by the `do_normalize` parameter in the `preprocess` method.
image_mean (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`):
Mean to use if normalizing the image. This is a float or list of floats the length of the number of
channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. Can be
overridden by the `image_mean` parameter in the `preprocess` method.
image_std (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`):
Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
Can be overridden by the `image_std` parameter in the `preprocess` method.
do_convert_rgb (`bool`, *optional*, defaults to `True`):
Whether to convert the image to RGB.
'''
def __init__(self, do_resize: bool=True, size: Optional[dict[str, int]]=None, crop_to_patches: bool=False, min_patches: int=1, max_patches: int=12, resample: PILImageResampling=PILImageResampling.BICUBIC, do_rescale: bool=True, rescale_factor: Union[int, float]=1 / 255, do_normalize: bool=True, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, do_convert_rgb: bool=True, **kwargs) -> None:
pass
def resize(self, image: np.ndarray, size: dict[str, int], resample: PILImageResampling=PILImageResampling.BICUBIC, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray:
'''
Resize an image to `(size["height"], size["width"])`.
Args:
image (`np.ndarray`):
Image to resize.
size (`dict[str, int]`):
Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image.
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
`PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BICUBIC`.
data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the output image. If unset, the channel dimension format of the input
image is used. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
Returns:
`np.ndarray`: The resized image.
'''
pass
@filter_out_non_signature_kwargs()
def preprocess(self, images: ImageInput, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, crop_to_patches: Optional[bool]=None, min_patches: Optional[int]=None, max_patches: Optional[int]=None, resample: Optional[PILImageResampling]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, return_tensors: Optional[Union[str, TensorType]]=None, do_convert_rgb: Optional[bool]=None, data_format: ChannelDimension=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> PIL.Image.Image:
'''
Preprocess an image or batch of images.
Args:
images (`ImageInput`):
Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
passing in images with pixel values between 0 and 1, set `do_rescale=False`.
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
Whether to resize the image.
size (`dict[str, int]`, *optional*, defaults to `self.size`):
Controls the size of the image after `resize`. The shortest edge of the image is resized to
`size["shortest_edge"]` whilst preserving the aspect ratio. If the longest edge of this resized image
is > `int(size["shortest_edge"] * (1333 / 800))`, then the image is resized again to make the longest
edge equal to `int(size["shortest_edge"] * (1333 / 800))`.
crop_to_patches (`bool`, *optional*, defaults to `self.crop_to_patches`):
Whether to crop the image to patches.
min_patches (`int`, *optional*, defaults to `self.min_patches`):
The minimum number of patches to be extracted from the image. Only has an effect if `crop_to_patches` is
set to `True`.
max_patches (`int`, *optional*, defaults to `self.max_patches`):
The maximum number of patches to be extracted from the image. Only has an effect if `crop_to_patches` is
set to `True`.
resample (`PILImageResampling`, *optional*, defaults to `self.resample`):
Resampling filter to use if resizing the image. Only has an effect if `do_resize` is set to `True`.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the image values between [0 - 1].
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
Whether to normalize the image.
image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`):
Image mean to normalize the image by if `do_normalize` is set to `True`.
image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`):
Image standard deviation to normalize the image by if `do_normalize` is set to `True`.
do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`):
Whether to convert the image to RGB.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- Unset: Use the channel dimension format of the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
'''
pass
def crop_image_to_patches(self, images: np.ndarray, min_patches: int, max_patches: int, use_thumbnail: bool=True, patch_size: Optional[Union[tuple, int, dict]]=None, data_format: Optional[ChannelDimension]=None):
'''
Crop the image to patches and return a list of cropped images.
The number of patches and their grid arrangement are determined by the original image size,
the target patch size and the minimum and maximum number of patches.
The aspect ratio of the patches grid is chosen to be the closest to the original image aspect ratio.
Args:
images (`np.ndarray`):
The image to be cropped.
min_patches (`int`):
The minimum number of patches to be extracted from the image.
max_patches (`int`):
The maximum number of patches to be extracted from the image.
use_thumbnail (`bool`, *optional*, defaults to `True`):
Whether to add a thumbnail image to the list of cropped patches.
patch_size (`int`, `tuple[int, int]`, `dict`, *optional*):
The size of the output patches.
data_format (`ChannelDimension`, *optional*):
The format of the image data. If `None`, the format is inferred from the input image.
Returns:
list[`PIL.Image.Image`] or list[np.ndarray]: The list of cropped images.
'''
pass
def get_number_of_image_patches(self, height: int, width: int, images_kwargs=None):
'''
A utility that returns number patches for a given image size.
Args:
height (`int`):
Height of the input image.
width (`int`):
Width of the input image.
images_kwargs (`dict`, *optional*)
Any kwargs to override defaults of the image processor.
Returns:
`int`: Number of patches per image.
'''
pass
| 7
| 5
| 76
| 6
| 44
| 27
| 8
| 0.78
| 1
| 10
| 2
| 0
| 4
| 9
| 4
| 24
| 346
| 31
| 177
| 80
| 127
| 138
| 88
| 35
| 83
| 17
| 3
| 3
| 33
|
2,753
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/got_ocr2/modeling_got_ocr2.py
|
transformers.models.got_ocr2.modeling_got_ocr2.GotOcr2CausalLMOutputWithPast
|
import torch.nn.functional as F
import torch.nn as nn
from dataclasses import dataclass
from typing import Optional, Union
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple
from ...modeling_outputs import BaseModelOutputWithPast, ModelOutput
import torch
from ...cache_utils import Cache
@dataclass
@auto_docstring(custom_intro='\n Base class for GotOcr2 causal language model (or autoregressive) outputs.\n ')
class GotOcr2CausalLMOutputWithPast(ModelOutput):
"""
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Language modeling loss (for next-token prediction).
logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
`past_key_values` input) to speed up sequential decoding.
image_hidden_states (`torch.FloatTensor`, *optional*):
A `torch.FloatTensor` of size `(batch_size, num_images, sequence_length, hidden_size)`.
image_hidden_states of the model produced by the vision encoder and after projecting the last hidden state.
"""
loss: Optional[torch.FloatTensor] = None
logits: Optional[torch.FloatTensor] = None
past_key_values: Optional[Cache] = None
hidden_states: Optional[tuple[torch.FloatTensor]] = None
attentions: Optional[tuple[torch.FloatTensor]] = None
image_hidden_states: Optional[torch.FloatTensor] = None
|
@dataclass
@auto_docstring(custom_intro='\n Base class for GotOcr2 causal language model (or autoregressive) outputs.\n ')
class GotOcr2CausalLMOutputWithPast(ModelOutput):
'''
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Language modeling loss (for next-token prediction).
logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
`past_key_values` input) to speed up sequential decoding.
image_hidden_states (`torch.FloatTensor`, *optional*):
A `torch.FloatTensor` of size `(batch_size, num_images, sequence_length, hidden_size)`.
image_hidden_states of the model produced by the vision encoder and after projecting the last hidden state.
'''
pass
| 3
| 1
| 0
| 0
| 0
| 0
| 0
| 3.57
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 37
| 5
| 7
| 7
| 6
| 25
| 7
| 7
| 6
| 0
| 1
| 0
| 0
|
2,754
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/got_ocr2/modeling_got_ocr2.py
|
transformers.models.got_ocr2.modeling_got_ocr2.GotOcr2ForConditionalGeneration
|
import torch.nn.functional as F
from typing import Optional, Union
from .configuration_got_ocr2 import GotOcr2Config, GotOcr2VisionConfig
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple
import torch
from ...processing_utils import Unpack
import torch.nn as nn
from ...cache_utils import Cache
from ...generation import GenerationMixin
@auto_docstring(custom_intro='\n The GOT_OCR2 model which consists of a vision backbone and a language model.\n ')
class GotOcr2ForConditionalGeneration(GotOcr2PreTrainedModel, GenerationMixin):
_checkpoint_conversion_mapping = {'^language_model.model': 'model.language_model', '^vision_tower': 'model.vision_tower', '^multi_modal_projector': 'model.multi_modal_projector', '^language_model.lm_head': 'lm_head'}
_tied_weights_keys = ['lm_head.weight']
def __init__(self, config: GotOcr2Config):
super().__init__(config)
self.model = GotOcr2Model(config)
self.lm_head = nn.Linear(config.text_config.hidden_size, config.text_config.vocab_size, bias=False)
self.post_init()
def get_input_embeddings(self):
return self.model.get_input_embeddings()
def set_input_embeddings(self, value):
self.model.set_input_embeddings(value)
def get_output_embeddings(self) -> nn.Module:
return self.lm_head
def set_decoder(self, decoder):
self.model.set_decoder(decoder)
def get_decoder(self):
return self.model.get_decoder()
def get_image_features(self, pixel_values: torch.FloatTensor, vision_feature_layer: Optional[Union[int, list[int]]]=None, vision_feature_select_strategy: Optional[str]=None, **kwargs):
return self.model.get_image_features(pixel_values=pixel_values, vision_feature_layer=vision_feature_layer, vision_feature_select_strategy=vision_feature_select_strategy, **kwargs)
@property
def language_model(self):
return self.model.language_model
@property
def vision_tower(self):
return self.model.vision_tower
@property
def multi_modal_projector(self):
return self.model.multi_modal_projector
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, pixel_values: Optional[torch.FloatTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, logits_to_keep: Union[int, torch.Tensor]=0, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple, GotOcr2CausalLMOutputWithPast]:
"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Example:
```python
>>> from PIL import Image
>>> import requests
>>> from transformers import AutoProcessor, GotOcr2ForConditionalGeneration, TextStreamer
>>> model = GotOcr2ForConditionalGeneration.from_pretrained("stepfun-ai/GOT-OCR-2.0-hf").to("cuda")
>>> processor = AutoProcessor.from_pretrained("stepfun-ai/GOT-OCR-2.0-hf")
>>> url = "https://huggingface.co/datasets/hf-internal-testing/fixtures_got_ocr/resolve/main/multi_box.png"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> inputs = processor(image, return_tensors="pt", color="green").to("cuda")
>>> # Generate
>>> streamer = TextStreamer(processor.tokenizer, skip_prompt=True, skip_special_tokens=True)
>>> generate_ids = model.generate(
... **inputs,
... do_sample=False,
... tokenizer = processor.tokenizer,
... stop_strings='<|im_end|>',
... streamer=streamer,
... max_new_tokens=4096,
... )
"You should keep in mind what features from the module should be used, especially
when you're planning to sell a template."
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.model(input_ids=input_ids, pixel_values=pixel_values, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True, cache_position=cache_position, logits_to_keep=logits_to_keep, **kwargs)
hidden_states = outputs[0]
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :])
loss = None
if labels is not None:
loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.text_config.vocab_size, **kwargs)
return GotOcr2CausalLMOutputWithPast(loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, image_hidden_states=outputs.image_hidden_states)
def prepare_inputs_for_generation(self, input_ids, past_key_values=None, inputs_embeds=None, pixel_values=None, attention_mask=None, cache_position=None, logits_to_keep=None, **kwargs):
model_inputs = super().prepare_inputs_for_generation(input_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, attention_mask=attention_mask, cache_position=cache_position, logits_to_keep=logits_to_keep, **kwargs)
if cache_position[0] == 0:
model_inputs['pixel_values'] = pixel_values
return model_inputs
|
@auto_docstring(custom_intro='\n The GOT_OCR2 model which consists of a vision backbone and a language model.\n ')
class GotOcr2ForConditionalGeneration(GotOcr2PreTrainedModel, GenerationMixin):
def __init__(self, config: GotOcr2Config):
pass
def get_input_embeddings(self):
pass
def set_input_embeddings(self, value):
pass
def get_output_embeddings(self) -> nn.Module:
pass
def set_decoder(self, decoder):
pass
def get_decoder(self):
pass
def get_image_features(self, pixel_values: torch.FloatTensor, vision_feature_layer: Optional[Union[int, list[int]]]=None, vision_feature_select_strategy: Optional[str]=None, **kwargs):
pass
@property
def language_model(self):
pass
@property
def vision_tower(self):
pass
@property
def multi_modal_projector(self):
pass
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, pixel_values: Optional[torch.FloatTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, logits_to_keep: Union[int, torch.Tensor]=0, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple, GotOcr2CausalLMOutputWithPast]:
'''
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Example:
```python
>>> from PIL import Image
>>> import requests
>>> from transformers import AutoProcessor, GotOcr2ForConditionalGeneration, TextStreamer
>>> model = GotOcr2ForConditionalGeneration.from_pretrained("stepfun-ai/GOT-OCR-2.0-hf").to("cuda")
>>> processor = AutoProcessor.from_pretrained("stepfun-ai/GOT-OCR-2.0-hf")
>>> url = "https://huggingface.co/datasets/hf-internal-testing/fixtures_got_ocr/resolve/main/multi_box.png"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> inputs = processor(image, return_tensors="pt", color="green").to("cuda")
>>> # Generate
>>> streamer = TextStreamer(processor.tokenizer, skip_prompt=True, skip_special_tokens=True)
>>> generate_ids = model.generate(
... **inputs,
... do_sample=False,
... tokenizer = processor.tokenizer,
... stop_strings='<|im_end|>',
... streamer=streamer,
... max_new_tokens=4096,
... )
"You should keep in mind what features from the module should be used, especially
when you're planning to sell a template."
```'''
pass
def prepare_inputs_for_generation(self, input_ids, past_key_values=None, inputs_embeds=None, pixel_values=None, attention_mask=None, cache_position=None, logits_to_keep=None, **kwargs):
pass
| 19
| 1
| 26
| 3
| 17
| 6
| 3
| 0.34
| 2
| 10
| 5
| 0
| 11
| 6
| 11
| 12
| 301
| 47
| 190
| 81
| 148
| 65
| 109
| 52
| 97
| 14
| 2
| 2
| 32
|
2,755
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/got_ocr2/modeling_got_ocr2.py
|
transformers.models.got_ocr2.modeling_got_ocr2.GotOcr2LayerNorm
|
import torch
import torch.nn.functional as F
import torch.nn as nn
class GotOcr2LayerNorm(nn.LayerNorm):
"""LayerNorm that supports two data formats: channels_last (default) or channels_first.
The ordering of the dimensions in the inputs. channels_last corresponds to inputs with shape (batch_size, height,
width, channels) while channels_first corresponds to inputs with shape (batch_size, channels, height, width).
"""
def __init__(self, normalized_shape, *, eps=1e-06, data_format='channels_last', **kwargs):
super().__init__(normalized_shape, eps=eps, **kwargs)
if data_format not in ['channels_last', 'channels_first']:
raise NotImplementedError(f'Unsupported data format: {data_format}')
self.data_format = data_format
def forward(self, features: torch.Tensor) -> torch.Tensor:
"""
Args:
features: Tensor of shape (batch_size, channels, height, width) OR (batch_size, height, width, channels)
"""
if self.data_format == 'channels_first':
features = features.permute(0, 2, 3, 1)
features = super().forward(features)
features = features.permute(0, 3, 1, 2)
else:
features = super().forward(features)
return features
|
class GotOcr2LayerNorm(nn.LayerNorm):
'''LayerNorm that supports two data formats: channels_last (default) or channels_first.
The ordering of the dimensions in the inputs. channels_last corresponds to inputs with shape (batch_size, height,
width, channels) while channels_first corresponds to inputs with shape (batch_size, channels, height, width).
'''
def __init__(self, normalized_shape, *, eps=1e-06, data_format='channels_last', **kwargs):
pass
def forward(self, features: torch.Tensor) -> torch.Tensor:
'''
Args:
features: Tensor of shape (batch_size, channels, height, width) OR (batch_size, height, width, channels)
'''
pass
| 3
| 2
| 11
| 0
| 11
| 0
| 3
| 0.18
| 1
| 3
| 0
| 0
| 2
| 5
| 2
| 12
| 28
| 2
| 22
| 11
| 19
| 4
| 21
| 11
| 18
| 3
| 1
| 1
| 5
|
2,756
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/got_ocr2/modeling_got_ocr2.py
|
transformers.models.got_ocr2.modeling_got_ocr2.GotOcr2MLPBlock
|
from ...activations import ACT2FN
import torch.nn as nn
import torch.nn.functional as F
import torch
class GotOcr2MLPBlock(nn.Module):
def __init__(self, config):
super().__init__()
self.lin1 = nn.Linear(config.hidden_size, config.mlp_dim)
self.lin2 = nn.Linear(config.mlp_dim, config.hidden_size)
self.act = ACT2FN[config.hidden_act]
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.lin1(hidden_states)
hidden_states = self.act(hidden_states)
hidden_states = self.lin2(hidden_states)
return hidden_states
|
class GotOcr2MLPBlock(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 5
| 0
| 5
| 0
| 1
| 0
| 1
| 2
| 0
| 0
| 2
| 3
| 2
| 12
| 12
| 1
| 11
| 6
| 8
| 0
| 11
| 6
| 8
| 1
| 1
| 0
| 2
|
2,757
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/got_ocr2/modeling_got_ocr2.py
|
transformers.models.got_ocr2.modeling_got_ocr2.GotOcr2MultiModalProjector
|
import torch
import torch.nn.functional as F
from .configuration_got_ocr2 import GotOcr2Config, GotOcr2VisionConfig
import torch.nn as nn
class GotOcr2MultiModalProjector(nn.Module):
def __init__(self, config: GotOcr2Config):
super().__init__()
vision_output_channels = config.vision_config.output_channels
language_hidden_size = config.text_config.hidden_size
self.conv_upsampler1 = nn.Conv2d(vision_output_channels, vision_output_channels * 2, kernel_size=3, stride=2, padding=1, bias=False)
self.conv_upsampler2 = nn.Conv2d(vision_output_channels * 2, language_hidden_size, kernel_size=3, stride=2, padding=1, bias=False)
self.multimodal_projector = nn.Linear(language_hidden_size, language_hidden_size)
def forward(self, vision_embeddings: torch.Tensor) -> torch.Tensor:
hidden_state = self.conv_upsampler1(vision_embeddings)
hidden_state = self.conv_upsampler2(hidden_state)
hidden_state = hidden_state.flatten(2).permute(0, 2, 1)
hidden_state = self.multimodal_projector(hidden_state)
return hidden_state
|
class GotOcr2MultiModalProjector(nn.Module):
def __init__(self, config: GotOcr2Config):
pass
def forward(self, vision_embeddings: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 9
| 0
| 9
| 0
| 1
| 0
| 1
| 3
| 1
| 0
| 2
| 3
| 2
| 12
| 19
| 1
| 18
| 9
| 15
| 0
| 14
| 9
| 11
| 1
| 1
| 0
| 2
|
2,758
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/got_ocr2/modeling_got_ocr2.py
|
transformers.models.got_ocr2.modeling_got_ocr2.GotOcr2PatchEmbeddings
|
import collections
import torch.nn as nn
class GotOcr2PatchEmbeddings(nn.Module):
"""
This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial
`hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a
Transformer.
"""
def __init__(self, config):
super().__init__()
image_size, patch_size = (config.image_size, config.patch_size)
num_channels, hidden_size = (config.num_channels, config.hidden_size)
image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size)
patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size)
num_patches = image_size[1] // patch_size[1] * (image_size[0] // patch_size[0])
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.num_patches = num_patches
self.projection = nn.Conv2d(num_channels, hidden_size, kernel_size=patch_size, stride=patch_size)
def forward(self, pixel_values):
batch_size, num_channels, height, width = pixel_values.shape
if num_channels != self.num_channels:
raise ValueError('Make sure that the channel dimension of the pixel values match with the one set in the configuration.')
if height != self.image_size[0] or width != self.image_size[1]:
raise ValueError(f"Input image size ({height}*{width}) doesn't match model ({self.image_size[0]}*{self.image_size[1]}).")
embeddings = self.projection(pixel_values).permute(0, 2, 3, 1)
return embeddings
|
class GotOcr2PatchEmbeddings(nn.Module):
'''
This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial
`hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a
Transformer.
'''
def __init__(self, config):
pass
def forward(self, pixel_values):
pass
| 3
| 1
| 13
| 1
| 12
| 0
| 3
| 0.2
| 1
| 2
| 0
| 0
| 2
| 5
| 2
| 12
| 33
| 3
| 25
| 13
| 22
| 5
| 21
| 13
| 18
| 3
| 1
| 1
| 6
|
2,759
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/got_ocr2/modeling_got_ocr2.py
|
transformers.models.got_ocr2.modeling_got_ocr2.GotOcr2PreTrainedModel
|
from .configuration_got_ocr2 import GotOcr2Config, GotOcr2VisionConfig
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple
from ...modeling_utils import PreTrainedModel
@auto_docstring
class GotOcr2PreTrainedModel(PreTrainedModel):
config: GotOcr2Config
base_model_prefix = ''
supports_gradient_checkpointing = True
_skip_keys_device_placement = 'past_key_values'
_supports_flash_attn = False
_supports_sdpa = False
_can_compile_fullgraph = True
_supports_flex_attn = False
_supports_attention_backend = True
def _init_weights(self, module):
super()._init_weights(module)
if isinstance(module, GotOcr2VisionAttention):
if module.use_rel_pos:
module.rel_pos_h.data.zero_()
module.rel_pos_w.data.zero_()
elif isinstance(module, GotOcr2VisionEncoder):
if module.pos_embed is not None:
module.pos_embed.data.zero_()
|
@auto_docstring
class GotOcr2PreTrainedModel(PreTrainedModel):
def _init_weights(self, module):
pass
| 3
| 0
| 21
| 2
| 16
| 3
| 7
| 0.12
| 1
| 0
| 0
| 1
| 1
| 0
| 1
| 1
| 31
| 3
| 25
| 11
| 23
| 3
| 20
| 11
| 18
| 7
| 1
| 2
| 7
|
2,760
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/got_ocr2/modeling_got_ocr2.py
|
transformers.models.got_ocr2.modeling_got_ocr2.GotOcr2VisionAttention
|
import torch.nn.functional as F
import torch
import torch.nn as nn
class GotOcr2VisionAttention(nn.Module):
"""Multi-head Attention block with relative position embeddings."""
def __init__(self, config, window_size):
super().__init__()
input_size = (config.image_size // config.patch_size, config.image_size // config.patch_size) if window_size == 0 else (window_size, window_size)
self.num_attention_heads = config.num_attention_heads
head_dim = config.hidden_size // config.num_attention_heads
self.scale = head_dim ** (-0.5)
self.dropout = config.attention_dropout
self.qkv = nn.Linear(config.hidden_size, config.hidden_size * 3, bias=config.qkv_bias)
self.proj = nn.Linear(config.hidden_size, config.hidden_size)
self.use_rel_pos = config.use_rel_pos
if self.use_rel_pos:
if input_size is None:
raise ValueError('Input size must be provided if using relative positional encoding.')
self.rel_pos_h = nn.Parameter(torch.zeros(2 * input_size[0] - 1, head_dim))
self.rel_pos_w = nn.Parameter(torch.zeros(2 * input_size[1] - 1, head_dim))
def get_rel_pos(self, q_size: int, k_size: int, rel_pos: torch.Tensor) -> torch.Tensor:
"""
Get relative positional embeddings according to the relative positions of
query and key sizes.
Args:
q_size (int):
size of the query.
k_size (int):
size of key k.
rel_pos (`torch.Tensor`):
relative position embeddings (L, channel).
Returns:
Extracted positional embeddings according to relative positions.
"""
max_rel_dist = int(2 * max(q_size, k_size) - 1)
rel_pos_resized = F.interpolate(rel_pos.reshape(1, rel_pos.shape[0], -1).permute(0, 2, 1), size=max_rel_dist, mode='linear')
rel_pos_resized = rel_pos_resized.reshape(-1, max_rel_dist).permute(1, 0)
q_coords = torch.arange(q_size)[:, None] * max(k_size / q_size, 1.0)
k_coords = torch.arange(k_size)[None, :] * max(q_size / k_size, 1.0)
relative_coords = q_coords - k_coords + (k_size - 1) * max(q_size / k_size, 1.0)
return rel_pos_resized[relative_coords.long()]
def get_decomposed_rel_pos(self, query: torch.Tensor, rel_pos_h: torch.Tensor, rel_pos_w: torch.Tensor, q_size: tuple[int, int], k_size: tuple[int, int]) -> torch.Tensor:
"""
Calculate decomposed Relative Positional Embeddings from :paper:`mvitv2`.
https://github.com/facebookresearch/mvit/blob/19786631e330df9f3622e5402b4a419a263a2c80/mvit/models/attention.py
Args:
query (`torch.Tensor`):
query q in the attention layer with shape (batch_size, query_height * query_width, channel).
rel_pos_h (`torch.Tensor`):
relative position embeddings (Lh, channel) for height axis.
rel_pos_w (`torch.Tensor`):
relative position embeddings (Lw, channel) for width axis.
q_size (tuple):
spatial sequence size of query q with (query_height, query_width).
k_size (tuple):
spatial sequence size of key k with (key_height, key_width).
Returns:
decomposed_rel_pos (`torch.Tensor`):
decomposed relative position embeddings.
"""
query_height, query_width = q_size
key_height, key_width = k_size
relative_position_height = self.get_rel_pos(query_height, key_height, rel_pos_h)
relative_position_width = self.get_rel_pos(query_width, key_width, rel_pos_w)
batch_size, _, dim = query.shape
reshaped_query = query.reshape(batch_size, query_height, query_width, dim)
rel_h = torch.einsum('bhwc,hkc->bhwk', reshaped_query, relative_position_height)
rel_w = torch.einsum('bhwc,wkc->bhwk', reshaped_query, relative_position_width)
decomposed_rel_pos = rel_h[:, :, :, :, None] + rel_w[:, :, :, None, :]
return decomposed_rel_pos
def forward(self, hidden_states: torch.Tensor, output_attentions=None) -> tuple[torch.Tensor, torch.Tensor]:
batch_size, height, width, _ = hidden_states.shape
qkv = self.qkv(hidden_states).reshape(batch_size, height * width, 3, self.num_attention_heads, -1).permute(2, 0, 3, 1, 4)
query, key, value = qkv.reshape(3, batch_size * self.num_attention_heads, height * width, -1).unbind(0)
attn_weights = query * self.scale @ key.transpose(-2, -1)
if self.use_rel_pos:
decomposed_rel_pos = self.get_decomposed_rel_pos(query, self.rel_pos_h, self.rel_pos_w, (height, width), (height, width))
decomposed_rel_pos = decomposed_rel_pos.reshape_as(attn_weights)
attn_weights = attn_weights + decomposed_rel_pos
attn_weights = torch.nn.functional.softmax(attn_weights, dtype=torch.float32, dim=-1).to(query.dtype)
attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
attn_output = (attn_probs @ value).reshape(batch_size, self.num_attention_heads, height, width, -1)
attn_output = attn_output.permute(0, 2, 3, 1, 4).reshape(batch_size, height, width, -1)
attn_output = self.proj(attn_output)
return (attn_output, attn_weights)
|
class GotOcr2VisionAttention(nn.Module):
'''Multi-head Attention block with relative position embeddings.'''
def __init__(self, config, window_size):
pass
def get_rel_pos(self, q_size: int, k_size: int, rel_pos: torch.Tensor) -> torch.Tensor:
'''
Get relative positional embeddings according to the relative positions of
query and key sizes.
Args:
q_size (int):
size of the query.
k_size (int):
size of key k.
rel_pos (`torch.Tensor`):
relative position embeddings (L, channel).
Returns:
Extracted positional embeddings according to relative positions.
'''
pass
def get_decomposed_rel_pos(self, query: torch.Tensor, rel_pos_h: torch.Tensor, rel_pos_w: torch.Tensor, q_size: tuple[int, int], k_size: tuple[int, int]) -> torch.Tensor:
'''
Calculate decomposed Relative Positional Embeddings from :paper:`mvitv2`.
https://github.com/facebookresearch/mvit/blob/19786631e330df9f3622e5402b4a419a263a2c80/mvit/models/attention.py
Args:
query (`torch.Tensor`):
query q in the attention layer with shape (batch_size, query_height * query_width, channel).
rel_pos_h (`torch.Tensor`):
relative position embeddings (Lh, channel) for height axis.
rel_pos_w (`torch.Tensor`):
relative position embeddings (Lw, channel) for width axis.
q_size (tuple):
spatial sequence size of query q with (query_height, query_width).
k_size (tuple):
spatial sequence size of key k with (key_height, key_width).
Returns:
decomposed_rel_pos (`torch.Tensor`):
decomposed relative position embeddings.
'''
pass
def forward(self, hidden_states: torch.Tensor, output_attentions=None) -> tuple[torch.Tensor, torch.Tensor]:
pass
| 5
| 3
| 33
| 5
| 19
| 10
| 2
| 0.51
| 1
| 4
| 0
| 0
| 4
| 8
| 4
| 14
| 138
| 23
| 76
| 43
| 63
| 39
| 53
| 35
| 48
| 4
| 1
| 2
| 9
|
2,761
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/got_ocr2/modeling_got_ocr2.py
|
transformers.models.got_ocr2.modeling_got_ocr2.GotOcr2VisionEncoder
|
from .configuration_got_ocr2 import GotOcr2Config, GotOcr2VisionConfig
import torch.nn as nn
import torch.nn.functional as F
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple
from typing import Optional, Union
from ...processing_utils import Unpack
from transformers.utils.generic import check_model_inputs
import torch
class GotOcr2VisionEncoder(GotOcr2PreTrainedModel):
_can_record_outputs = {'hidden_states': GotOcr2VisionLayer, 'attentions': GotOcr2VisionAttention}
def __init__(self, config: GotOcr2VisionConfig):
super().__init__(config)
self.config = config
self.image_size = config.image_size
self.patch_embed = GotOcr2PatchEmbeddings(config)
self.pos_embed = None
if config.use_abs_pos:
self.pos_embed = nn.Parameter(torch.zeros(1, config.image_size // config.patch_size, config.image_size // config.patch_size, config.hidden_size))
self.layers = nn.ModuleList()
for i in range(config.num_hidden_layers):
layer = GotOcr2VisionLayer(config, window_size=config.window_size if i not in config.global_attn_indexes else 0)
self.layers.append(layer)
self.neck = GotOcr2VisionNeck(config)
self.gradient_checkpointing = False
def get_input_embeddings(self):
return self.patch_embed
@check_model_inputs
def forward(self, pixel_values: Optional[torch.FloatTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> GotOcr2VisionEncoderOutput:
if pixel_values is None:
raise ValueError('You have to specify pixel_values')
hidden_states = self.patch_embed(pixel_values)
if self.pos_embed is not None:
hidden_states = hidden_states + self.pos_embed
for layer_module in self.layers:
hidden_states = layer_module(hidden_states)
hidden_states = self.neck(hidden_states)
return GotOcr2VisionEncoderOutput(last_hidden_state=hidden_states)
|
class GotOcr2VisionEncoder(GotOcr2PreTrainedModel):
def __init__(self, config: GotOcr2VisionConfig):
pass
def get_input_embeddings(self):
pass
@check_model_inputs
def forward(self, pixel_values: Optional[torch.FloatTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> GotOcr2VisionEncoderOutput:
pass
| 5
| 0
| 30
| 5
| 24
| 0
| 7
| 0.01
| 1
| 10
| 5
| 0
| 3
| 7
| 3
| 13
| 93
| 18
| 74
| 25
| 64
| 1
| 48
| 19
| 44
| 16
| 1
| 2
| 21
|
2,762
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/got_ocr2/modeling_got_ocr2.py
|
transformers.models.got_ocr2.modeling_got_ocr2.GotOcr2VisionLayer
|
import torch.nn as nn
from ...modeling_layers import GradientCheckpointingLayer
import torch
import torch.nn.functional as F
class GotOcr2VisionLayer(GradientCheckpointingLayer):
def __init__(self, config, window_size):
super().__init__()
self.layer_norm1 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.attn = GotOcr2VisionAttention(config, window_size)
self.layer_norm2 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.mlp = GotOcr2MLPBlock(config)
self.window_size = window_size
def window_partition(self, hidden_states: torch.Tensor, window_size: int) -> tuple[torch.Tensor, tuple[int, int]]:
"""
Args:
Partition into non-overlapping windows with padding if needed.
hidden_states (tensor): input tokens with [batch_size, height, width, channel]. window_size (int): window
size.
Returns:
windows: windows after partition with [batch_size * num_windows, window_size, window_size, channel].
(pad_height, pad_width): padded height and width before partition
"""
batch_size, height, width, channel = hidden_states.shape
pad_h = (window_size - height % window_size) % window_size
pad_w = (window_size - width % window_size) % window_size
hidden_states = F.pad(hidden_states, (0, 0, 0, pad_w, 0, pad_h))
pad_height, pad_width = (height + pad_h, width + pad_w)
hidden_states = hidden_states.reshape(batch_size, pad_height // window_size, window_size, pad_width // window_size, window_size, channel)
windows = hidden_states.permute(0, 1, 3, 2, 4, 5).contiguous().reshape(-1, window_size, window_size, channel)
return (windows, (pad_height, pad_width))
def window_unpartition(self, windows: torch.Tensor, window_size: int, padding_shape: tuple[int, int], original_shape: tuple[int, int]) -> torch.Tensor:
"""
Args:
Window unpartition into original sequences and removing padding.
hidden_states (tensor):
input tokens with [batch_size * num_windows, window_size, window_size, channel].
window_size (int):
window size.
padding_shape (Tuple):
padded height and width (pad_height, pad_width).
original_shape (Tuple): original height and width (height, width) before padding.
Returns:
hidden_states: unpartitioned sequences with [batch_size, height, width, channel].
"""
pad_height, pad_width = padding_shape
height, width = original_shape
batch_size = windows.shape[0] // (pad_height * pad_width // window_size // window_size)
hidden_states = windows.reshape(batch_size, pad_height // window_size, pad_width // window_size, window_size, window_size, -1)
hidden_states = hidden_states.permute(0, 1, 3, 2, 4, 5).contiguous().reshape(batch_size, pad_height, pad_width, -1)
hidden_states = hidden_states[:, :height, :width, :].contiguous()
return hidden_states
def forward(self, hidden_states: torch.Tensor) -> tuple[torch.FloatTensor]:
residual = hidden_states
hidden_states = self.layer_norm1(hidden_states)
if self.window_size > 0:
height, width = (hidden_states.shape[1], hidden_states.shape[2])
hidden_states, padding_shape = self.window_partition(hidden_states, self.window_size)
hidden_states, attn_weights = self.attn(hidden_states=hidden_states)
if self.window_size > 0:
hidden_states = self.window_unpartition(hidden_states, self.window_size, padding_shape, (height, width))
hidden_states = residual + hidden_states
layernorm_output = self.layer_norm2(hidden_states)
hidden_states = hidden_states + self.mlp(layernorm_output)
return hidden_states
|
class GotOcr2VisionLayer(GradientCheckpointingLayer):
def __init__(self, config, window_size):
pass
def window_partition(self, hidden_states: torch.Tensor, window_size: int) -> tuple[torch.Tensor, tuple[int, int]]:
'''
Args:
Partition into non-overlapping windows with padding if needed.
hidden_states (tensor): input tokens with [batch_size, height, width, channel]. window_size (int): window
size.
Returns:
windows: windows after partition with [batch_size * num_windows, window_size, window_size, channel].
(pad_height, pad_width): padded height and width before partition
'''
pass
def window_unpartition(self, windows: torch.Tensor, window_size: int, padding_shape: tuple[int, int], original_shape: tuple[int, int]) -> torch.Tensor:
'''
Args:
Window unpartition into original sequences and removing padding.
hidden_states (tensor):
input tokens with [batch_size * num_windows, window_size, window_size, channel].
window_size (int):
window size.
padding_shape (Tuple):
padded height and width (pad_height, pad_width).
original_shape (Tuple): original height and width (height, width) before padding.
Returns:
hidden_states: unpartitioned sequences with [batch_size, height, width, channel].
'''
pass
def forward(self, hidden_states: torch.Tensor) -> tuple[torch.FloatTensor]:
pass
| 5
| 2
| 22
| 3
| 14
| 6
| 2
| 0.43
| 1
| 6
| 2
| 0
| 4
| 5
| 4
| 14
| 93
| 13
| 56
| 31
| 45
| 24
| 41
| 25
| 36
| 4
| 1
| 1
| 7
|
2,763
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/got_ocr2/modeling_got_ocr2.py
|
transformers.models.got_ocr2.modeling_got_ocr2.GotOcr2VisionNeck
|
from .configuration_got_ocr2 import GotOcr2Config, GotOcr2VisionConfig
import torch.nn as nn
class GotOcr2VisionNeck(nn.Module):
def __init__(self, config: GotOcr2VisionConfig):
super().__init__()
self.config = config
self.conv1 = nn.Conv2d(config.hidden_size, config.output_channels, kernel_size=1, bias=False)
self.layer_norm1 = GotOcr2LayerNorm(config.output_channels, data_format='channels_first')
self.conv2 = nn.Conv2d(config.output_channels, config.output_channels, kernel_size=3, padding=1, bias=False)
self.layer_norm2 = GotOcr2LayerNorm(config.output_channels, data_format='channels_first')
def forward(self, hidden_states):
hidden_states = hidden_states.permute(0, 3, 1, 2)
hidden_states = self.conv1(hidden_states)
hidden_states = self.layer_norm1(hidden_states)
hidden_states = self.conv2(hidden_states)
hidden_states = self.layer_norm2(hidden_states)
return hidden_states
|
class GotOcr2VisionNeck(nn.Module):
def __init__(self, config: GotOcr2VisionConfig):
pass
def forward(self, hidden_states):
pass
| 3
| 0
| 8
| 1
| 7
| 0
| 1
| 0
| 1
| 3
| 2
| 0
| 2
| 5
| 2
| 12
| 18
| 3
| 15
| 8
| 12
| 0
| 15
| 8
| 12
| 1
| 1
| 0
| 2
|
2,764
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/got_ocr2/modular_got_ocr2.py
|
transformers.models.got_ocr2.modular_got_ocr2.GotOcr2CausalLMOutputWithPast
|
from ..llava.modeling_llava import LlavaCausalLMOutputWithPast, LlavaForConditionalGeneration, LlavaModel, LlavaModelOutputWithPast, LlavaPreTrainedModel, TransformersKwargs
class GotOcr2CausalLMOutputWithPast(LlavaCausalLMOutputWithPast):
pass
|
class GotOcr2CausalLMOutputWithPast(LlavaCausalLMOutputWithPast):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2
| 0
| 2
| 1
| 1
| 0
| 2
| 1
| 1
| 0
| 2
| 0
| 0
|
2,765
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/got_ocr2/modular_got_ocr2.py
|
transformers.models.got_ocr2.modular_got_ocr2.GotOcr2Config
|
from ..auto import CONFIG_MAPPING, AutoConfig
from ...configuration_utils import PretrainedConfig
class GotOcr2Config(PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`GotOcr2ForConditionalGeneration`]. It is used to instantiate a
GotOcr2 model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of GOT-OCR-2.0.
e.g [stepfun-ai/GOT-OCR-2.0-hf](https://huggingface.co/stepfun-ai/GOT-OCR-2.0-hf)
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vision_config (`Union[AutoConfig, dict]`, *optional*, defaults to `CLIPVisionConfig`):
The config object or dictionary of the vision backbone.
text_config (`Union[AutoConfig, dict]`, *optional*, defaults to `LlamaConfig`):
The config object or dictionary of the text backbone.
image_token_index (`int`, *optional*, defaults to 151859):
The image token index to encode the image prompt.
image_seq_length (`int`, *optional*, defaults to 576):
Sequence length of one image embedding.
pad_token_id (`int`, *optional*, defaults to -1):
Padding token id.
```python
>>> from transformers import GotOcr2ForConditionalGeneration, GotOcr2Config
>>> # Initializing a GotOcr2 style configuration
>>> configuration = GotOcr2Config()
>>> # Initializing a model from the Qwen2-VL-7B style configuration
>>> model = GotOcr2ForConditionalGeneration(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = 'got_ocr2'
attribute_map = {'image_token_id': 'image_token_index'}
sub_configs = {'text_config': AutoConfig, 'vision_config': GotOcr2VisionConfig}
def __init__(self, vision_config=None, text_config=None, image_token_index=151859, image_seq_length=576, pad_token_id=-1, **kwargs):
self.image_token_index = image_token_index
self.image_seq_length = image_seq_length
self.pad_token_id = pad_token_id
if vision_config is None:
self.vision_config = GotOcr2VisionConfig()
elif isinstance(vision_config, dict):
self.vision_config = GotOcr2VisionConfig(**vision_config)
elif isinstance(vision_config, GotOcr2VisionConfig):
self.vision_config = vision_config
if isinstance(text_config, dict):
text_config['model_type'] = text_config.get('model_type', 'qwen2')
text_config = CONFIG_MAPPING[text_config['model_type']](**text_config)
elif text_config is None:
text_config = CONFIG_MAPPING['qwen2'](vocab_size=151860, hidden_size=1024, intermediate_size=2816, num_hidden_layers=24, num_attention_heads=16, num_key_value_heads=16, hidden_act='silu', max_position_embeddings=32768, initializer_range=0.02, rms_norm_eps=1e-06, use_cache=True, tie_word_embeddings=True, rope_theta=1000000.0, rope_scaling=None, use_sliding_window=False, sliding_window=4096, max_window_layers=21, attention_dropout=0.0)
self.text_config = text_config
super().__init__(**kwargs)
|
class GotOcr2Config(PretrainedConfig):
'''
This is the configuration class to store the configuration of a [`GotOcr2ForConditionalGeneration`]. It is used to instantiate a
GotOcr2 model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of GOT-OCR-2.0.
e.g [stepfun-ai/GOT-OCR-2.0-hf](https://huggingface.co/stepfun-ai/GOT-OCR-2.0-hf)
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vision_config (`Union[AutoConfig, dict]`, *optional*, defaults to `CLIPVisionConfig`):
The config object or dictionary of the vision backbone.
text_config (`Union[AutoConfig, dict]`, *optional*, defaults to `LlamaConfig`):
The config object or dictionary of the text backbone.
image_token_index (`int`, *optional*, defaults to 151859):
The image token index to encode the image prompt.
image_seq_length (`int`, *optional*, defaults to 576):
Sequence length of one image embedding.
pad_token_id (`int`, *optional*, defaults to -1):
Padding token id.
```python
>>> from transformers import GotOcr2ForConditionalGeneration, GotOcr2Config
>>> # Initializing a GotOcr2 style configuration
>>> configuration = GotOcr2Config()
>>> # Initializing a model from the Qwen2-VL-7B style configuration
>>> model = GotOcr2ForConditionalGeneration(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```'''
def __init__(self, vision_config=None, text_config=None, image_token_index=151859, image_seq_length=576, pad_token_id=-1, **kwargs):
pass
| 2
| 1
| 50
| 4
| 46
| 0
| 7
| 0.59
| 1
| 3
| 1
| 0
| 1
| 6
| 1
| 33
| 92
| 14
| 49
| 19
| 38
| 29
| 18
| 10
| 16
| 7
| 2
| 1
| 7
|
2,766
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/got_ocr2/modular_got_ocr2.py
|
transformers.models.got_ocr2.modular_got_ocr2.GotOcr2ForConditionalGeneration
|
from ..llava.modeling_llava import LlavaCausalLMOutputWithPast, LlavaForConditionalGeneration, LlavaModel, LlavaModelOutputWithPast, LlavaPreTrainedModel, TransformersKwargs
from ...utils import auto_docstring, can_return_tuple, logging
from ...cache_utils import Cache
from typing import Optional, Union
from ...processing_utils import Unpack
import torch.nn as nn
import torch
class GotOcr2ForConditionalGeneration(LlavaForConditionalGeneration):
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, pixel_values: Optional[torch.FloatTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, logits_to_keep: Union[int, torch.Tensor]=0, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple, GotOcr2CausalLMOutputWithPast]:
"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Example:
```python
>>> from PIL import Image
>>> import requests
>>> from transformers import AutoProcessor, GotOcr2ForConditionalGeneration, TextStreamer
>>> model = GotOcr2ForConditionalGeneration.from_pretrained("stepfun-ai/GOT-OCR-2.0-hf").to("cuda")
>>> processor = AutoProcessor.from_pretrained("stepfun-ai/GOT-OCR-2.0-hf")
>>> url = "https://huggingface.co/datasets/hf-internal-testing/fixtures_got_ocr/resolve/main/multi_box.png"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> inputs = processor(image, return_tensors="pt", color="green").to("cuda")
>>> # Generate
>>> streamer = TextStreamer(processor.tokenizer, skip_prompt=True, skip_special_tokens=True)
>>> generate_ids = model.generate(
... **inputs,
... do_sample=False,
... tokenizer = processor.tokenizer,
... stop_strings='<|im_end|>',
... streamer=streamer,
... max_new_tokens=4096,
... )
"You should keep in mind what features from the module should be used, especially
when you're planning to sell a template."
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.model(input_ids=input_ids, pixel_values=pixel_values, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True, cache_position=cache_position, logits_to_keep=logits_to_keep, **kwargs)
hidden_states = outputs[0]
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :])
loss = None
if labels is not None:
loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.text_config.vocab_size, **kwargs)
return GotOcr2CausalLMOutputWithPast(loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, image_hidden_states=outputs.image_hidden_states)
|
class GotOcr2ForConditionalGeneration(LlavaForConditionalGeneration):
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, pixel_values: Optional[torch.FloatTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, logits_to_keep: Union[int, torch.Tensor]=0, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple, GotOcr2CausalLMOutputWithPast]:
'''
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Example:
```python
>>> from PIL import Image
>>> import requests
>>> from transformers import AutoProcessor, GotOcr2ForConditionalGeneration, TextStreamer
>>> model = GotOcr2ForConditionalGeneration.from_pretrained("stepfun-ai/GOT-OCR-2.0-hf").to("cuda")
>>> processor = AutoProcessor.from_pretrained("stepfun-ai/GOT-OCR-2.0-hf")
>>> url = "https://huggingface.co/datasets/hf-internal-testing/fixtures_got_ocr/resolve/main/multi_box.png"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> inputs = processor(image, return_tensors="pt", color="green").to("cuda")
>>> # Generate
>>> streamer = TextStreamer(processor.tokenizer, skip_prompt=True, skip_special_tokens=True)
>>> generate_ids = model.generate(
... **inputs,
... do_sample=False,
... tokenizer = processor.tokenizer,
... stop_strings='<|im_end|>',
... streamer=streamer,
... max_new_tokens=4096,
... )
"You should keep in mind what features from the module should be used, especially
when you're planning to sell a template."
```'''
pass
| 4
| 1
| 55
| 8
| 31
| 16
| 6
| 0.48
| 1
| 11
| 5
| 0
| 3
| 6
| 3
| 17
| 170
| 26
| 97
| 42
| 73
| 47
| 50
| 23
| 46
| 14
| 3
| 2
| 17
|
2,767
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/got_ocr2/modular_got_ocr2.py
|
transformers.models.got_ocr2.modular_got_ocr2.GotOcr2MLPBlock
|
from ..sam.modeling_sam import SamMLPBlock, SamPreTrainedModel, SamVisionAttention, SamVisionEncoder, SamVisionLayer
class GotOcr2MLPBlock(SamMLPBlock):
pass
|
class GotOcr2MLPBlock(SamMLPBlock):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 12
| 2
| 0
| 2
| 1
| 1
| 0
| 2
| 1
| 1
| 0
| 2
| 0
| 0
|
2,768
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/got_ocr2/modular_got_ocr2.py
|
transformers.models.got_ocr2.modular_got_ocr2.GotOcr2MultiModalProjector
|
import torch.nn as nn
import torch
class GotOcr2MultiModalProjector(nn.Module):
def __init__(self, config: GotOcr2Config):
super().__init__()
vision_output_channels = config.vision_config.output_channels
language_hidden_size = config.text_config.hidden_size
self.conv_upsampler1 = nn.Conv2d(vision_output_channels, vision_output_channels * 2, kernel_size=3, stride=2, padding=1, bias=False)
self.conv_upsampler2 = nn.Conv2d(vision_output_channels * 2, language_hidden_size, kernel_size=3, stride=2, padding=1, bias=False)
self.multimodal_projector = nn.Linear(language_hidden_size, language_hidden_size)
def forward(self, vision_embeddings: torch.Tensor) -> torch.Tensor:
hidden_state = self.conv_upsampler1(vision_embeddings)
hidden_state = self.conv_upsampler2(hidden_state)
hidden_state = hidden_state.flatten(2).permute(0, 2, 1)
hidden_state = self.multimodal_projector(hidden_state)
return hidden_state
|
class GotOcr2MultiModalProjector(nn.Module):
def __init__(self, config: GotOcr2Config):
pass
def forward(self, vision_embeddings: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 9
| 0
| 9
| 0
| 1
| 0
| 1
| 3
| 1
| 0
| 2
| 3
| 2
| 12
| 19
| 1
| 18
| 9
| 15
| 0
| 14
| 9
| 11
| 1
| 1
| 0
| 2
|
2,769
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/got_ocr2/modular_got_ocr2.py
|
transformers.models.got_ocr2.modular_got_ocr2.GotOcr2PreTrainedModel
|
from ..sam.modeling_sam import SamMLPBlock, SamPreTrainedModel, SamVisionAttention, SamVisionEncoder, SamVisionLayer
class GotOcr2PreTrainedModel(SamPreTrainedModel):
pass
|
class GotOcr2PreTrainedModel(SamPreTrainedModel):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 2
| 0
| 2
| 1
| 1
| 0
| 2
| 1
| 1
| 0
| 2
| 0
| 0
|
2,770
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/got_ocr2/modular_got_ocr2.py
|
transformers.models.got_ocr2.modular_got_ocr2.GotOcr2VisionAttention
|
from ..sam.modeling_sam import SamMLPBlock, SamPreTrainedModel, SamVisionAttention, SamVisionEncoder, SamVisionLayer
class GotOcr2VisionAttention(SamVisionAttention):
pass
|
class GotOcr2VisionAttention(SamVisionAttention):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 14
| 2
| 0
| 2
| 1
| 1
| 0
| 2
| 1
| 1
| 0
| 2
| 0
| 0
|
2,771
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/got_ocr2/modular_got_ocr2.py
|
transformers.models.got_ocr2.modular_got_ocr2.GotOcr2VisionConfig
|
from ...configuration_utils import PretrainedConfig
class GotOcr2VisionConfig(PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`GotOcr2VisionModel`]. It is used to instantiate a GOT_OCR2
vision encoder according to the specified arguments, defining the model architecture. Instantiating a configuration
defaults will yield a similar configuration to that of the SAM ViT-h
[facebook/sam-vit-huge](https://huggingface.co/facebook/sam-vit-huge) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
hidden_size (`int`, *optional*, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
output_channels (`int`, *optional*, defaults to 256):
Dimensionality of the output channels in the Patch Encoder.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
num_channels (`int`, *optional*, defaults to 3):
Number of channels in the input image.
image_size (`int`, *optional*, defaults to 1024):
Expected resolution. Target size of the resized input image.
patch_size (`int`, *optional*, defaults to 16):
Size of the patches to be extracted from the input image.
hidden_act (`str`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string)
layer_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon used by the layer normalization layers.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
initializer_range (`float`, *optional*, defaults to 1e-10):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
qkv_bias (`bool`, *optional*, defaults to `True`):
Whether to add a bias to query, key, value projections.
use_abs_pos (`bool`, *optional*, defaults to `True`):
Whether to use absolute position embedding.
use_rel_pos (`bool`, *optional*, defaults to `True`):
Whether to use relative position embedding.
window_size (`int`, *optional*, defaults to 14):
Window size for relative position.
global_attn_indexes (`list[int]`, *optional*, defaults to `[2, 5, 8, 11]`):
The indexes of the global attention layers.
mlp_dim (`int`, *optional*, defaults to 3072):
The dimensionality of the MLP layer in the Transformer encoder.
"""
base_config_key = 'vision_config'
def __init__(self, hidden_size=768, output_channels=256, num_hidden_layers=12, num_attention_heads=12, num_channels=3, image_size=1024, patch_size=16, hidden_act='gelu', layer_norm_eps=1e-06, attention_dropout=0.0, initializer_range=1e-10, qkv_bias=True, use_abs_pos=True, use_rel_pos=True, window_size=14, global_attn_indexes=[2, 5, 8, 11], mlp_dim=3072, **kwargs):
super().__init__(**kwargs)
self.hidden_size = hidden_size
self.output_channels = output_channels
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.num_channels = num_channels
self.image_size = image_size
self.patch_size = patch_size
self.hidden_act = hidden_act
self.layer_norm_eps = layer_norm_eps
self.attention_dropout = attention_dropout
self.initializer_range = initializer_range
self.qkv_bias = qkv_bias
self.use_abs_pos = use_abs_pos
self.use_rel_pos = use_rel_pos
self.window_size = window_size
self.global_attn_indexes = global_attn_indexes
self.mlp_dim = mlp_dim
|
class GotOcr2VisionConfig(PretrainedConfig):
'''
This is the configuration class to store the configuration of a [`GotOcr2VisionModel`]. It is used to instantiate a GOT_OCR2
vision encoder according to the specified arguments, defining the model architecture. Instantiating a configuration
defaults will yield a similar configuration to that of the SAM ViT-h
[facebook/sam-vit-huge](https://huggingface.co/facebook/sam-vit-huge) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
hidden_size (`int`, *optional*, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
output_channels (`int`, *optional*, defaults to 256):
Dimensionality of the output channels in the Patch Encoder.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
num_channels (`int`, *optional*, defaults to 3):
Number of channels in the input image.
image_size (`int`, *optional*, defaults to 1024):
Expected resolution. Target size of the resized input image.
patch_size (`int`, *optional*, defaults to 16):
Size of the patches to be extracted from the input image.
hidden_act (`str`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string)
layer_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon used by the layer normalization layers.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
initializer_range (`float`, *optional*, defaults to 1e-10):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
qkv_bias (`bool`, *optional*, defaults to `True`):
Whether to add a bias to query, key, value projections.
use_abs_pos (`bool`, *optional*, defaults to `True`):
Whether to use absolute position embedding.
use_rel_pos (`bool`, *optional*, defaults to `True`):
Whether to use relative position embedding.
window_size (`int`, *optional*, defaults to 14):
Window size for relative position.
global_attn_indexes (`list[int]`, *optional*, defaults to `[2, 5, 8, 11]`):
The indexes of the global attention layers.
mlp_dim (`int`, *optional*, defaults to 3072):
The dimensionality of the MLP layer in the Transformer encoder.
'''
def __init__(self, hidden_size=768, output_channels=256, num_hidden_layers=12, num_attention_heads=12, num_channels=3, image_size=1024, patch_size=16, hidden_act='gelu', layer_norm_eps=1e-06, attention_dropout=0.0, initializer_range=1e-10, qkv_bias=True, use_abs_pos=True, use_rel_pos=True, window_size=14, global_attn_indexes=[2, 5, 8, 11], mlp_dim=3072, **kwargs):
pass
| 2
| 1
| 40
| 1
| 39
| 0
| 1
| 1.05
| 1
| 1
| 0
| 0
| 1
| 17
| 1
| 33
| 89
| 5
| 41
| 40
| 19
| 43
| 21
| 20
| 19
| 1
| 2
| 0
| 1
|
2,772
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/got_ocr2/modular_got_ocr2.py
|
transformers.models.got_ocr2.modular_got_ocr2.GotOcr2VisionEncoder
|
from ..sam.modeling_sam import SamMLPBlock, SamPreTrainedModel, SamVisionAttention, SamVisionEncoder, SamVisionLayer
class GotOcr2VisionEncoder(SamVisionEncoder, GotOcr2PreTrainedModel):
pass
|
class GotOcr2VisionEncoder(SamVisionEncoder, GotOcr2PreTrainedModel):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 13
| 2
| 0
| 2
| 1
| 1
| 0
| 2
| 1
| 1
| 0
| 2
| 0
| 0
|
2,773
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/got_ocr2/modular_got_ocr2.py
|
transformers.models.got_ocr2.modular_got_ocr2.GotOcr2VisionLayer
|
from ..sam.modeling_sam import SamMLPBlock, SamPreTrainedModel, SamVisionAttention, SamVisionEncoder, SamVisionLayer
import torch.nn as nn
class GotOcr2VisionLayer(SamVisionLayer):
def __init__(self, config, window_size):
super().__init__(config, window_size)
self.layer_norm1 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.attn = GotOcr2VisionAttention(config, window_size)
self.layer_norm2 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.mlp = GotOcr2MLPBlock(config)
self.window_size = window_size
|
class GotOcr2VisionLayer(SamVisionLayer):
def __init__(self, config, window_size):
pass
| 2
| 0
| 7
| 0
| 7
| 0
| 1
| 0
| 1
| 3
| 2
| 0
| 1
| 5
| 1
| 15
| 8
| 0
| 8
| 7
| 6
| 0
| 8
| 7
| 6
| 1
| 2
| 0
| 1
|
2,774
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/got_ocr2/processing_got_ocr2.py
|
transformers.models.got_ocr2.processing_got_ocr2.GotOcr2ImagesKwargs
|
from transformers.processing_utils import ImagesKwargs, ProcessingKwargs, ProcessorMixin, TextKwargs, Unpack
from typing import Optional, Union
class GotOcr2ImagesKwargs(ImagesKwargs, total=False):
box: Optional[Union[list, tuple[float, float], tuple[float, float, float, float]]]
color: Optional[str]
num_image_tokens: Optional[int]
multi_page: Optional[bool]
crop_to_patches: Optional[bool]
min_patches: Optional[int]
max_patches: Optional[int]
|
class GotOcr2ImagesKwargs(ImagesKwargs, total=False):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 8
| 0
| 8
| 1
| 7
| 0
| 8
| 1
| 7
| 0
| 2
| 0
| 0
|
2,775
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/got_ocr2/processing_got_ocr2.py
|
transformers.models.got_ocr2.processing_got_ocr2.GotOcr2Processor
|
from transformers.tokenization_utils_base import PreTokenizedInput, TextInput
import numpy as np
from ...image_utils import ImageInput
from typing import Optional, Union
from ...image_processing_utils import BatchFeature
from transformers.processing_utils import ImagesKwargs, ProcessingKwargs, ProcessorMixin, TextKwargs, Unpack
class GotOcr2Processor(ProcessorMixin):
"""
Constructs a GotOcr2 processor which wraps a [`GotOcr2ImageProcessor`] and
[`PretrainedTokenizerFast`] tokenizer into a single processor that inherits both the image processor and
tokenizer functionalities. See the [`~GotOcr2Processor.__call__`] and [`~GotOcr2Processor.decode`] for more information.
Args:
image_processor ([`GotOcr2ImageProcessor`], *optional*):
The image processor is a required input.
tokenizer ([`PreTrainedTokenizer`, `PreTrainedTokenizerFast`], *optional*):
The tokenizer is a required input.
chat_template (`str`, *optional*): A Jinja template which will be used to convert lists of messages
in a chat into a tokenizable string.
"""
attributes = ['image_processor', 'tokenizer']
image_processor_class = 'AutoImageProcessor'
tokenizer_class = 'PreTrainedTokenizerFast'
def __init__(self, image_processor=None, tokenizer=None, chat_template=None, **kwargs):
super().__init__(image_processor, tokenizer, chat_template=chat_template)
self.message_start_token = '<|im_start|>'
self.message_end_token = '<|im_end|>'
self.img_start_token = '<img>'
self.img_end_token = '</img>'
self.img_pad_token = '<imgpad>'
self.image_token = '<imgpad>'
self.image_token_id = tokenizer.convert_tokens_to_ids(self.image_token)
self.system_query = 'system\nYou should follow the instructions carefully and explain your answers in detail.'
def _make_list_of_inputs(self, images, text, box, color, multi_page):
if not isinstance(images, (list, tuple)):
images = [images]
if multi_page:
logger.warning('Multi-page inference is enabled but only one image is passed.')
images = [images]
elif isinstance(images[0], (list, tuple)) and (not multi_page):
raise ValueError('Nested images are only supported with `multi_page` set to `True`.')
elif not isinstance(images[0], (list, tuple)) and multi_page:
images = [images]
if isinstance(text, str):
text = [text]
if not isinstance(box[0], (list, tuple)):
box = [box for _ in range(len(images))]
if not isinstance(color, (list, tuple)):
color = [color for _ in range(len(images))]
return (images, text, box, color)
def __call__(self, images: Optional[ImageInput]=None, text: Optional[Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]]]=None, audio=None, videos=None, **kwargs: Unpack[GotOcr2ProcessorKwargs]) -> BatchFeature:
"""
Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text`
and `kwargs` arguments to PreTrainedTokenizerFast's [`~PreTrainedTokenizerFast.__call__`] to encode the text if `text`
is not `None`, otherwise encode default OCR queries which depends on the `format`, `box`, `color`, `multi_page` and
`crop_to_patches` arguments. To prepare the vision inputs, this method forwards the `images` and `kwargs` arguments to
GotOcr2ImageProcessor's [`~GotOcr2ImageProcessor.__call__`] if `images` is not `None`.
Args:
images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `list[PIL.Image.Image]`, `list[np.ndarray]`, `list[torch.Tensor]`):
The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
tensor. Both channels-first and channels-last formats are supported.
text (`str`, `list[str]`, `list[list[str]]`):
The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
(pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
`is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
format (`bool`, *optional*):
If set, will add the format token to the query, and the model will return the OCR result with formatting.
box (`list[float]`, `list[tuple[float, float]]`, `list[tuple[float, float, float, float]]`, *optional*):
The box annotation to be added to the query. If a list of floats or a tuple of floats is provided, it
will be interpreted as [x1, y1, x2, y2]. If a list of tuples is provided, each tuple should be in the
form (x1, y1, x2, y2).
color (`str`, *optional*):
The color annotation to be added to the query. The model will return the OCR result within the box with
the specified color.
multi_page (`bool`, *optional*):
If set, will enable multi-page inference. The model will return the OCR result across multiple pages.
crop_to_patches (`bool`, *optional*):
If set, will crop the image to patches. The model will return the OCR result upon the patch reference.
min_patches (`int`, *optional*):
The minimum number of patches to be cropped from the image. Only used when `crop_to_patches` is set to
`True`.
max_patches (`int`, *optional*):
The maximum number of patches to be cropped from the image. Only used when `crop_to_patches` is set to
`True`.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors of a particular framework. Acceptable values are:
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return NumPy `np.ndarray` objects.
Returns:
[`BatchFeature`]: A [`BatchFeature`] with the following fields:
- **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`.
- **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
`return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not
`None`).
- **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`.
"""
output_kwargs = self._merge_kwargs(GotOcr2ProcessorKwargs, tokenizer_init_kwargs=self.tokenizer.init_kwargs, **kwargs)
format_output = output_kwargs['text_kwargs'].pop('format')
num_image_tokens = output_kwargs['images_kwargs'].pop('num_image_tokens')
box = output_kwargs['images_kwargs'].pop('box', [None])
color = output_kwargs['images_kwargs'].pop('color', None)
multi_page = output_kwargs['images_kwargs'].pop('multi_page')
crop_to_patches = output_kwargs['images_kwargs'].get('crop_to_patches')
images, text, box, color = self._make_list_of_inputs(images, text, box, color, multi_page)
if multi_page:
num_pages_per_batch = [len(image_group) for image_group in images]
images = [image for image_group in images for image in image_group]
else:
num_pages_per_batch = [1 for _ in range(len(images))]
images = load_images(images)
image_sizes = [image.size for image in images]
image_inputs = self.image_processor(images=images, **output_kwargs['images_kwargs'])
num_patches_array = image_inputs.pop('num_patches')
if text is None:
text = []
patch_indices = np.cumsum(num_pages_per_batch)
for index, (num_pages, box_single, color_single) in enumerate(zip(num_pages_per_batch, box, color)):
current_patch_index = patch_indices[index - 1] if index > 0 else 0
num_patches = sum(num_patches_array[current_patch_index:current_patch_index + num_pages])
if box_single[0] is not None:
box_single = preprocess_box_annotation(box_single, image_sizes[index])
query = f"{(f'[{color_single}] ' if color_single is not None else '')}{(str(box_single) if box_single[0] is not None else '')} OCR{(' with format' if format_output else '')}{(' across multi pages' if multi_page else '')}{(' upon the patch reference' if crop_to_patches else '')}: "
prompt = self.message_start_token + self.system_query + self.message_end_token + self.message_start_token + 'user\n' + self.img_start_token + self.img_pad_token * num_image_tokens * num_patches + self.img_end_token + '\n' + query + self.message_end_token + self.message_start_token + 'assistant\n'
text.append(prompt)
return_tensors = output_kwargs['text_kwargs'].pop('return_tensors', None)
text_inputs = self.tokenizer(text, **output_kwargs['text_kwargs'])
self._check_special_mm_tokens(text, text_inputs, modalities=['image'])
return BatchFeature(data={**text_inputs, **image_inputs}, tensor_type=return_tensors)
|
class GotOcr2Processor(ProcessorMixin):
'''
Constructs a GotOcr2 processor which wraps a [`GotOcr2ImageProcessor`] and
[`PretrainedTokenizerFast`] tokenizer into a single processor that inherits both the image processor and
tokenizer functionalities. See the [`~GotOcr2Processor.__call__`] and [`~GotOcr2Processor.decode`] for more information.
Args:
image_processor ([`GotOcr2ImageProcessor`], *optional*):
The image processor is a required input.
tokenizer ([`PreTrainedTokenizer`, `PreTrainedTokenizerFast`], *optional*):
The tokenizer is a required input.
chat_template (`str`, *optional*): A Jinja template which will be used to convert lists of messages
in a chat into a tokenizable string.
'''
def __init__(self, image_processor=None, tokenizer=None, chat_template=None, **kwargs):
pass
def _make_list_of_inputs(self, images, text, box, color, multi_page):
pass
def __call__(self, images: Optional[ImageInput]=None, text: Optional[Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]]]=None, audio=None, videos=None, **kwargs: Unpack[GotOcr2ProcessorKwargs]) -> BatchFeature:
'''
Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text`
and `kwargs` arguments to PreTrainedTokenizerFast's [`~PreTrainedTokenizerFast.__call__`] to encode the text if `text`
is not `None`, otherwise encode default OCR queries which depends on the `format`, `box`, `color`, `multi_page` and
`crop_to_patches` arguments. To prepare the vision inputs, this method forwards the `images` and `kwargs` arguments to
GotOcr2ImageProcessor's [`~GotOcr2ImageProcessor.__call__`] if `images` is not `None`.
Args:
images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `list[PIL.Image.Image]`, `list[np.ndarray]`, `list[torch.Tensor]`):
The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
tensor. Both channels-first and channels-last formats are supported.
text (`str`, `list[str]`, `list[list[str]]`):
The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
(pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
`is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
format (`bool`, *optional*):
If set, will add the format token to the query, and the model will return the OCR result with formatting.
box (`list[float]`, `list[tuple[float, float]]`, `list[tuple[float, float, float, float]]`, *optional*):
The box annotation to be added to the query. If a list of floats or a tuple of floats is provided, it
will be interpreted as [x1, y1, x2, y2]. If a list of tuples is provided, each tuple should be in the
form (x1, y1, x2, y2).
color (`str`, *optional*):
The color annotation to be added to the query. The model will return the OCR result within the box with
the specified color.
multi_page (`bool`, *optional*):
If set, will enable multi-page inference. The model will return the OCR result across multiple pages.
crop_to_patches (`bool`, *optional*):
If set, will crop the image to patches. The model will return the OCR result upon the patch reference.
min_patches (`int`, *optional*):
The minimum number of patches to be cropped from the image. Only used when `crop_to_patches` is set to
`True`.
max_patches (`int`, *optional*):
The maximum number of patches to be cropped from the image. Only used when `crop_to_patches` is set to
`True`.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors of a particular framework. Acceptable values are:
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return NumPy `np.ndarray` objects.
Returns:
[`BatchFeature`]: A [`BatchFeature`] with the following fields:
- **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`.
- **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
`return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not
`None`).
- **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`.
'''
pass
| 4
| 2
| 30
| 2
| 18
| 10
| 4
| 0.61
| 1
| 10
| 2
| 0
| 6
| 6
| 6
| 23
| 205
| 20
| 115
| 43
| 100
| 70
| 68
| 34
| 61
| 14
| 2
| 3
| 26
|
2,776
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/got_ocr2/processing_got_ocr2.py
|
transformers.models.got_ocr2.processing_got_ocr2.GotOcr2ProcessorKwargs
|
from transformers.processing_utils import ImagesKwargs, ProcessingKwargs, ProcessorMixin, TextKwargs, Unpack
class GotOcr2ProcessorKwargs(ProcessingKwargs, total=False):
text_kwargs: GotOcr2TextKwargs
images_kwargs: GotOcr2ImagesKwargs
_defaults = {'text_kwargs': {'padding': False, 'format': False}, 'images_kwargs': {'num_image_tokens': 256, 'multi_page': False, 'crop_to_patches': False, 'min_patches': 1, 'max_patches': 12}}
|
class GotOcr2ProcessorKwargs(ProcessingKwargs, total=False):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 16
| 0
| 16
| 2
| 15
| 0
| 4
| 2
| 3
| 0
| 3
| 0
| 0
|
2,777
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/got_ocr2/processing_got_ocr2.py
|
transformers.models.got_ocr2.processing_got_ocr2.GotOcr2TextKwargs
|
from transformers.processing_utils import ImagesKwargs, ProcessingKwargs, ProcessorMixin, TextKwargs, Unpack
from typing import Optional, Union
class GotOcr2TextKwargs(TextKwargs, total=False):
format: Optional[bool]
|
class GotOcr2TextKwargs(TextKwargs, total=False):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2
| 0
| 2
| 1
| 1
| 0
| 2
| 1
| 1
| 0
| 2
| 0
| 0
|
2,778
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/gpt2/configuration_gpt2.py
|
transformers.models.gpt2.configuration_gpt2.GPT2Config
|
from ...configuration_utils import PretrainedConfig
class GPT2Config(PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`GPT2Model`] or a [`TFGPT2Model`]. It is used to
instantiate a GPT-2 model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the GPT-2
[openai-community/gpt2](https://huggingface.co/openai-community/gpt2) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 50257):
Vocabulary size of the GPT-2 model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`GPT2Model`] or [`TFGPT2Model`].
n_positions (`int`, *optional*, defaults to 1024):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
n_embd (`int`, *optional*, defaults to 768):
Dimensionality of the embeddings and hidden states.
n_layer (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
n_head (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
n_inner (`int`, *optional*):
Dimensionality of the inner feed-forward layers. `None` will set it to 4 times n_embd
activation_function (`str`, *optional*, defaults to `"gelu_new"`):
Activation function, to be selected in the list `["relu", "silu", "gelu", "tanh", "gelu_new"]`.
resid_pdrop (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
embd_pdrop (`float`, *optional*, defaults to 0.1):
The dropout ratio for the embeddings.
attn_pdrop (`float`, *optional*, defaults to 0.1):
The dropout ratio for the attention.
layer_norm_epsilon (`float`, *optional*, defaults to 1e-05):
The epsilon to use in the layer normalization layers.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
summary_type (`string`, *optional*, defaults to `"cls_index"`):
Argument used when doing sequence summary, used in the models [`GPT2DoubleHeadsModel`] and
[`TFGPT2DoubleHeadsModel`].
Has to be one of the following options:
- `"last"`: Take the last token hidden state (like XLNet).
- `"first"`: Take the first token hidden state (like BERT).
- `"mean"`: Take the mean of all tokens hidden states.
- `"cls_index"`: Supply a Tensor of classification token position (like GPT/GPT-2).
- `"attn"`: Not implemented now, use multi-head attention.
summary_use_proj (`bool`, *optional*, defaults to `True`):
Argument used when doing sequence summary, used in the models [`GPT2DoubleHeadsModel`] and
[`TFGPT2DoubleHeadsModel`].
Whether or not to add a projection after the vector extraction.
summary_activation (`str`, *optional*):
Argument used when doing sequence summary. Used in for the multiple choice head in
[`GPT2DoubleHeadsModel`].
Pass `"tanh"` for a tanh activation to the output, any other value will result in no activation.
summary_proj_to_labels (`bool`, *optional*, defaults to `True`):
Argument used when doing sequence summary, used in the models [`GPT2DoubleHeadsModel`] and
[`TFGPT2DoubleHeadsModel`].
Whether the projection outputs should have `config.num_labels` or `config.hidden_size` classes.
summary_first_dropout (`float`, *optional*, defaults to 0.1):
Argument used when doing sequence summary, used in the models [`GPT2DoubleHeadsModel`] and
[`TFGPT2DoubleHeadsModel`].
The dropout ratio to be used after the projection and activation.
scale_attn_weights (`bool`, *optional*, defaults to `True`):
Scale attention weights by dividing by sqrt(hidden_size)..
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
bos_token_id (`int`, *optional*, defaults to 50256):
Id of the beginning of sentence token in the vocabulary.
eos_token_id (`int`, *optional*, defaults to 50256):
Id of the end of sentence token in the vocabulary.
scale_attn_by_inverse_layer_idx (`bool`, *optional*, defaults to `False`):
Whether to additionally scale attention weights by `1 / layer_idx + 1`.
reorder_and_upcast_attn (`bool`, *optional*, defaults to `False`):
Whether to scale keys (K) prior to computing attention (dot-product) and upcast attention
dot-product/softmax to float() when training with mixed precision.
Example:
```python
>>> from transformers import GPT2Config, GPT2Model
>>> # Initializing a GPT2 configuration
>>> configuration = GPT2Config()
>>> # Initializing a model (with random weights) from the configuration
>>> model = GPT2Model(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = 'gpt2'
keys_to_ignore_at_inference = ['past_key_values']
attribute_map = {'hidden_size': 'n_embd', 'max_position_embeddings': 'n_positions', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer'}
def __init__(self, vocab_size=50257, n_positions=1024, n_embd=768, n_layer=12, n_head=12, n_inner=None, activation_function='gelu_new', resid_pdrop=0.1, embd_pdrop=0.1, attn_pdrop=0.1, layer_norm_epsilon=1e-05, initializer_range=0.02, summary_type='cls_index', summary_use_proj=True, summary_activation=None, summary_proj_to_labels=True, summary_first_dropout=0.1, scale_attn_weights=True, use_cache=True, bos_token_id=50256, eos_token_id=50256, scale_attn_by_inverse_layer_idx=False, reorder_and_upcast_attn=False, **kwargs):
self.vocab_size = vocab_size
self.n_positions = n_positions
self.n_embd = n_embd
self.n_layer = n_layer
self.n_head = n_head
self.n_inner = n_inner
self.activation_function = activation_function
self.resid_pdrop = resid_pdrop
self.embd_pdrop = embd_pdrop
self.attn_pdrop = attn_pdrop
self.layer_norm_epsilon = layer_norm_epsilon
self.initializer_range = initializer_range
self.summary_type = summary_type
self.summary_use_proj = summary_use_proj
self.summary_activation = summary_activation
self.summary_first_dropout = summary_first_dropout
self.summary_proj_to_labels = summary_proj_to_labels
self.scale_attn_weights = scale_attn_weights
self.use_cache = use_cache
self.scale_attn_by_inverse_layer_idx = scale_attn_by_inverse_layer_idx
self.reorder_and_upcast_attn = reorder_and_upcast_attn
self.bos_token_id = bos_token_id
self.eos_token_id = eos_token_id
super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
|
class GPT2Config(PretrainedConfig):
'''
This is the configuration class to store the configuration of a [`GPT2Model`] or a [`TFGPT2Model`]. It is used to
instantiate a GPT-2 model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the GPT-2
[openai-community/gpt2](https://huggingface.co/openai-community/gpt2) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 50257):
Vocabulary size of the GPT-2 model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`GPT2Model`] or [`TFGPT2Model`].
n_positions (`int`, *optional*, defaults to 1024):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
n_embd (`int`, *optional*, defaults to 768):
Dimensionality of the embeddings and hidden states.
n_layer (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
n_head (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
n_inner (`int`, *optional*):
Dimensionality of the inner feed-forward layers. `None` will set it to 4 times n_embd
activation_function (`str`, *optional*, defaults to `"gelu_new"`):
Activation function, to be selected in the list `["relu", "silu", "gelu", "tanh", "gelu_new"]`.
resid_pdrop (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
embd_pdrop (`float`, *optional*, defaults to 0.1):
The dropout ratio for the embeddings.
attn_pdrop (`float`, *optional*, defaults to 0.1):
The dropout ratio for the attention.
layer_norm_epsilon (`float`, *optional*, defaults to 1e-05):
The epsilon to use in the layer normalization layers.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
summary_type (`string`, *optional*, defaults to `"cls_index"`):
Argument used when doing sequence summary, used in the models [`GPT2DoubleHeadsModel`] and
[`TFGPT2DoubleHeadsModel`].
Has to be one of the following options:
- `"last"`: Take the last token hidden state (like XLNet).
- `"first"`: Take the first token hidden state (like BERT).
- `"mean"`: Take the mean of all tokens hidden states.
- `"cls_index"`: Supply a Tensor of classification token position (like GPT/GPT-2).
- `"attn"`: Not implemented now, use multi-head attention.
summary_use_proj (`bool`, *optional*, defaults to `True`):
Argument used when doing sequence summary, used in the models [`GPT2DoubleHeadsModel`] and
[`TFGPT2DoubleHeadsModel`].
Whether or not to add a projection after the vector extraction.
summary_activation (`str`, *optional*):
Argument used when doing sequence summary. Used in for the multiple choice head in
[`GPT2DoubleHeadsModel`].
Pass `"tanh"` for a tanh activation to the output, any other value will result in no activation.
summary_proj_to_labels (`bool`, *optional*, defaults to `True`):
Argument used when doing sequence summary, used in the models [`GPT2DoubleHeadsModel`] and
[`TFGPT2DoubleHeadsModel`].
Whether the projection outputs should have `config.num_labels` or `config.hidden_size` classes.
summary_first_dropout (`float`, *optional*, defaults to 0.1):
Argument used when doing sequence summary, used in the models [`GPT2DoubleHeadsModel`] and
[`TFGPT2DoubleHeadsModel`].
The dropout ratio to be used after the projection and activation.
scale_attn_weights (`bool`, *optional*, defaults to `True`):
Scale attention weights by dividing by sqrt(hidden_size)..
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
bos_token_id (`int`, *optional*, defaults to 50256):
Id of the beginning of sentence token in the vocabulary.
eos_token_id (`int`, *optional*, defaults to 50256):
Id of the end of sentence token in the vocabulary.
scale_attn_by_inverse_layer_idx (`bool`, *optional*, defaults to `False`):
Whether to additionally scale attention weights by `1 / layer_idx + 1`.
reorder_and_upcast_attn (`bool`, *optional*, defaults to `False`):
Whether to scale keys (K) prior to computing attention (dot-product) and upcast attention
dot-product/softmax to float() when training with mixed precision.
Example:
```python
>>> from transformers import GPT2Config, GPT2Model
>>> # Initializing a GPT2 configuration
>>> configuration = GPT2Config()
>>> # Initializing a model (with random weights) from the configuration
>>> model = GPT2Model(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```'''
def __init__(self, vocab_size=50257, n_positions=1024, n_embd=768, n_layer=12, n_head=12, n_inner=None, activation_function='gelu_new', resid_pdrop=0.1, embd_pdrop=0.1, attn_pdrop=0.1, layer_norm_epsilon=1e-05, initializer_range=0.02, summary_type='cls_index', summary_use_proj=True, summary_activation=None, summary_proj_to_labels=True, summary_first_dropout=0.1, scale_attn_weights=True, use_cache=True, bos_token_id=50256, eos_token_id=50256, scale_attn_by_inverse_layer_idx=False, reorder_and_upcast_attn=False, **kwargs):
pass
| 2
| 1
| 53
| 2
| 51
| 0
| 1
| 1.37
| 1
| 1
| 0
| 0
| 1
| 23
| 1
| 1
| 160
| 18
| 60
| 54
| 32
| 82
| 29
| 28
| 27
| 1
| 1
| 0
| 1
|
2,779
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/gpt2/configuration_gpt2.py
|
transformers.models.gpt2.configuration_gpt2.GPT2OnnxConfig
|
from ... import PreTrainedTokenizer, is_torch_available
from collections import OrderedDict
from typing import Any, Optional
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...configuration_utils import PretrainedConfig
from collections.abc import Mapping
class GPT2OnnxConfig(OnnxConfigWithPast):
def __init__(self, config: PretrainedConfig, task: str='default', patching_specs: Optional[list[PatchingSpec]]=None, use_past: bool=False):
super().__init__(config, task=task, patching_specs=patching_specs, use_past=use_past)
if not getattr(self._config, 'pad_token_id', None):
self._config.pad_token_id = 0
@property
def inputs(self) -> Mapping[str, Mapping[int, str]]:
common_inputs = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}})
if self.use_past:
self.fill_with_past_key_values_(common_inputs, direction='inputs')
common_inputs['attention_mask'] = {0: 'batch', 1: 'past_sequence + sequence'}
else:
common_inputs['attention_mask'] = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def num_layers(self) -> int:
return self._config.n_layer
@property
def num_attention_heads(self) -> int:
return self._config.n_head
def generate_dummy_inputs(self, tokenizer: PreTrainedTokenizer, batch_size: int=-1, seq_length: int=-1, is_pair: bool=False) -> Mapping[str, Any]:
common_inputs = super(OnnxConfigWithPast, self).generate_dummy_inputs(tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair)
ordered_inputs = OrderedDict({'input_ids': common_inputs['input_ids']})
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.')
else:
import torch
batch, seqlen = common_inputs['input_ids'].shape
past_key_values_length = seqlen + 2
past_shape = (batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads)
ordered_inputs['past_key_values'] = [(torch.zeros(past_shape), torch.zeros(past_shape)) for _ in range(self.num_layers)]
ordered_inputs['attention_mask'] = common_inputs['attention_mask']
if self.use_past:
mask_dtype = ordered_inputs['attention_mask'].dtype
ordered_inputs['attention_mask'] = torch.cat([ordered_inputs['attention_mask'], torch.ones(batch, past_key_values_length, dtype=mask_dtype)], dim=1)
return ordered_inputs
@property
def default_onnx_opset(self) -> int:
return 13
|
class GPT2OnnxConfig(OnnxConfigWithPast):
def __init__(self, config: PretrainedConfig, task: str='default', patching_specs: Optional[list[PatchingSpec]]=None, use_past: bool=False):
pass
@property
def inputs(self) -> Mapping[str, Mapping[int, str]]:
pass
@property
def num_layers(self) -> int:
pass
@property
def num_attention_heads(self) -> int:
pass
def generate_dummy_inputs(self, tokenizer: PreTrainedTokenizer, batch_size: int=-1, seq_length: int=-1, is_pair: bool=False) -> Mapping[str, Any]:
pass
@property
def default_onnx_opset(self) -> int:
pass
| 11
| 0
| 12
| 1
| 10
| 1
| 2
| 0.06
| 1
| 8
| 0
| 0
| 6
| 0
| 6
| 6
| 79
| 11
| 64
| 33
| 39
| 4
| 34
| 15
| 26
| 4
| 1
| 2
| 11
|
2,780
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/gpt2/modeling_gpt2.py
|
transformers.models.gpt2.modeling_gpt2.GPT2Attention
|
from typing import Callable, Optional, Union
from torch import nn
from ...pytorch_utils import Conv1D, find_pruneable_heads_and_indices, prune_conv1d_layer
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache
import torch
from ...utils.deprecation import deprecate_kwarg
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
class GPT2Attention(nn.Module):
def __init__(self, config, is_cross_attention=False, layer_idx=None):
super().__init__()
self.config = config
max_positions = config.max_position_embeddings
self.register_buffer('bias', torch.tril(torch.ones((max_positions, max_positions), dtype=torch.bool)).view(1, 1, max_positions, max_positions), persistent=False)
self.register_buffer('masked_bias', torch.tensor(-10000.0), persistent=False)
self.embed_dim = config.hidden_size
self.num_heads = config.num_attention_heads
self.head_dim = self.embed_dim // self.num_heads
self.split_size = self.embed_dim
if self.head_dim * self.num_heads != self.embed_dim:
raise ValueError(f'`embed_dim` must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {self.num_heads}).')
self.scale_attn_weights = config.scale_attn_weights
self.is_cross_attention = is_cross_attention
self.scale_attn_by_inverse_layer_idx = config.scale_attn_by_inverse_layer_idx
self.layer_idx = layer_idx
self.reorder_and_upcast_attn = config.reorder_and_upcast_attn
if self.is_cross_attention:
self.c_attn = Conv1D(2 * self.embed_dim, self.embed_dim)
self.q_attn = Conv1D(self.embed_dim, self.embed_dim)
else:
self.c_attn = Conv1D(3 * self.embed_dim, self.embed_dim)
self.c_proj = Conv1D(self.embed_dim, self.embed_dim)
self.attn_dropout = nn.Dropout(config.attn_pdrop)
self.resid_dropout = nn.Dropout(config.resid_pdrop)
self.is_causal = True
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(heads, self.num_heads, self.head_dim, self.pruned_heads)
index_attn = torch.cat([index, index + self.split_size, index + 2 * self.split_size])
self.c_attn = prune_conv1d_layer(self.c_attn, index_attn, dim=1)
self.c_proj = prune_conv1d_layer(self.c_proj, index, dim=0)
self.split_size = self.split_size // self.num_heads * (self.num_heads - len(heads))
self.num_heads = self.num_heads - len(heads)
self.pruned_heads = self.pruned_heads.union(heads)
def _upcast_and_reordered_attn(self, query, key, value, attention_mask=None, head_mask=None):
bsz, num_heads, q_seq_len, dk = query.size()
_, _, k_seq_len, _ = key.size()
attn_weights = torch.empty(bsz * num_heads, q_seq_len, k_seq_len, dtype=torch.float32, device=query.device)
scale_factor = 1.0
if self.scale_attn_weights:
scale_factor /= float(value.size(-1)) ** 0.5
if self.scale_attn_by_inverse_layer_idx:
scale_factor /= float(self.layer_idx + 1)
with torch.autocast(query.device.type, enabled=False):
q, k = (query.reshape(-1, q_seq_len, dk), key.transpose(-1, -2).reshape(-1, dk, k_seq_len))
attn_weights = torch.baddbmm(attn_weights, q.float(), k.float(), beta=0, alpha=scale_factor)
attn_weights = attn_weights.reshape(bsz, num_heads, q_seq_len, k_seq_len)
if not self.is_cross_attention:
query_length, key_length = (query.size(-2), key.size(-2))
causal_mask = self.bias[:, :, key_length - query_length:key_length, :key_length]
mask_value = torch.finfo(attn_weights.dtype).min
mask_value = torch.tensor(mask_value, dtype=attn_weights.dtype, device=attn_weights.device)
attn_weights = torch.where(causal_mask, attn_weights, mask_value)
if attention_mask is not None:
attn_weights = attn_weights + attention_mask
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
if attn_weights.dtype != torch.float32:
raise RuntimeError('Error with upcasting, attn_weights does not have dtype torch.float32')
attn_weights = attn_weights.type(value.dtype)
attn_weights = self.attn_dropout(attn_weights)
if head_mask is not None:
attn_weights = attn_weights * head_mask
attn_output = torch.matmul(attn_weights, value)
attn_output = attn_output.transpose(1, 2)
return (attn_output, attn_weights)
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: Optional[tuple[torch.FloatTensor]], past_key_values: Optional[Cache]=None, cache_position: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False, **kwargs) -> tuple[Union[torch.Tensor, tuple[torch.Tensor]], ...]:
is_cross_attention = encoder_hidden_states is not None
if past_key_values is not None:
if isinstance(past_key_values, EncoderDecoderCache):
is_updated = past_key_values.is_updated.get(self.layer_idx)
if is_cross_attention:
curr_past_key_value = past_key_values.cross_attention_cache
else:
curr_past_key_value = past_key_values.self_attention_cache
else:
curr_past_key_value = past_key_values
if is_cross_attention:
if not hasattr(self, 'q_attn'):
raise ValueError('If class is used as cross attention, the weights `q_attn` have to be defined. Please make sure to instantiate class with `GPT2Attention(..., is_cross_attention=True)`.')
query_states = self.q_attn(hidden_states)
attention_mask = encoder_attention_mask
if past_key_values is not None and is_updated:
key_states = curr_past_key_value.layers[self.layer_idx].keys
value_states = curr_past_key_value.layers[self.layer_idx].values
else:
key_states, value_states = self.c_attn(encoder_hidden_states).split(self.split_size, dim=2)
shape_kv = (*key_states.shape[:-1], -1, self.head_dim)
key_states = key_states.view(shape_kv).transpose(1, 2)
value_states = value_states.view(shape_kv).transpose(1, 2)
else:
query_states, key_states, value_states = self.c_attn(hidden_states).split(self.split_size, dim=2)
shape_kv = (*key_states.shape[:-1], -1, self.head_dim)
key_states = key_states.view(shape_kv).transpose(1, 2)
value_states = value_states.view(shape_kv).transpose(1, 2)
shape_q = (*query_states.shape[:-1], -1, self.head_dim)
query_states = query_states.view(shape_q).transpose(1, 2)
if past_key_values is not None and (not is_cross_attention) or (past_key_values is not None and is_cross_attention and (not is_updated)):
cache_position = cache_position if not is_cross_attention else None
key_states, value_states = curr_past_key_value.update(key_states, value_states, self.layer_idx, {'cache_position': cache_position})
if is_cross_attention:
past_key_values.is_updated[self.layer_idx] = True
is_causal = attention_mask is None and query_states.shape[-2] > 1 and (not is_cross_attention)
using_eager = self.config._attn_implementation == 'eager'
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != 'eager':
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
if using_eager and self.reorder_and_upcast_attn:
attn_output, attn_weights = self._upcast_and_reordered_attn(query_states, key_states, value_states, attention_mask, head_mask)
else:
attn_output, attn_weights = attention_interface(self, query_states, key_states, value_states, attention_mask, head_mask=head_mask, dropout=self.attn_dropout.p if self.training else 0.0, is_causal=is_causal, **kwargs)
attn_output = attn_output.reshape(*attn_output.shape[:-2], -1).contiguous()
attn_output = self.c_proj(attn_output)
attn_output = self.resid_dropout(attn_output)
return (attn_output, attn_weights)
|
class GPT2Attention(nn.Module):
def __init__(self, config, is_cross_attention=False, layer_idx=None):
pass
def prune_heads(self, heads):
pass
def _upcast_and_reordered_attn(self, query, key, value, attention_mask=None, head_mask=None):
pass
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: Optional[tuple[torch.FloatTensor]], past_key_values: Optional[Cache]=None, cache_position: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False, **kwargs) -> tuple[Union[torch.Tensor, tuple[torch.Tensor]], ...]:
pass
| 6
| 0
| 49
| 8
| 37
| 4
| 6
| 0.11
| 1
| 9
| 1
| 0
| 4
| 17
| 4
| 14
| 199
| 33
| 150
| 57
| 134
| 17
| 107
| 46
| 102
| 10
| 1
| 2
| 22
|
2,781
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/gpt2/modeling_gpt2.py
|
transformers.models.gpt2.modeling_gpt2.GPT2Block
|
from typing import Callable, Optional, Union
from torch import nn
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache
import torch
from ...modeling_layers import GradientCheckpointingLayer
from ...utils.deprecation import deprecate_kwarg
class GPT2Block(GradientCheckpointingLayer):
def __init__(self, config, layer_idx=None):
super().__init__()
hidden_size = config.hidden_size
inner_dim = config.n_inner if config.n_inner is not None else 4 * hidden_size
self.ln_1 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
self.attn = GPT2Attention(config=config, layer_idx=layer_idx)
self.ln_2 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
if config.add_cross_attention:
self.crossattention = GPT2Attention(config=config, is_cross_attention=True, layer_idx=layer_idx)
self.ln_cross_attn = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
self.mlp = GPT2MLP(inner_dim, config)
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: Optional[tuple[torch.FloatTensor]], past_key_values: Optional[Cache]=None, cache_position: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=False, output_attentions: Optional[bool]=False, **kwargs) -> Union[tuple[torch.Tensor], Optional[tuple[torch.Tensor, tuple[torch.FloatTensor, ...]]]]:
residual = hidden_states
hidden_states = self.ln_1(hidden_states)
attn_output, self_attn_weights = self.attn(hidden_states, past_key_values=past_key_values, cache_position=cache_position, attention_mask=attention_mask, head_mask=head_mask, use_cache=use_cache, output_attentions=output_attentions, **kwargs)
hidden_states = attn_output + residual
if encoder_hidden_states is not None:
if not hasattr(self, 'crossattention'):
raise ValueError(f'If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`')
residual = hidden_states
hidden_states = self.ln_cross_attn(hidden_states)
cross_attn_output, cross_attn_weights = self.crossattention(hidden_states, past_key_values=past_key_values, attention_mask=attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_attentions=output_attentions)
hidden_states = residual + cross_attn_output
residual = hidden_states
hidden_states = self.ln_2(hidden_states)
feed_forward_hidden_states = self.mlp(hidden_states)
hidden_states = residual + feed_forward_hidden_states
outputs = (hidden_states,)
if output_attentions:
outputs += (self_attn_weights,)
if encoder_hidden_states is not None:
outputs += (cross_attn_weights,)
return outputs
|
class GPT2Block(GradientCheckpointingLayer):
def __init__(self, config, layer_idx=None):
pass
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: Optional[tuple[torch.FloatTensor]], past_key_values: Optional[Cache]=None, cache_position: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=False, output_attentions: Optional[bool]=False, **kwargs) -> Union[tuple[torch.Tensor], Optional[tuple[torch.Tensor, tuple[torch.FloatTensor, ...]]]]:
pass
| 4
| 0
| 37
| 4
| 32
| 4
| 4
| 0.11
| 1
| 6
| 2
| 0
| 2
| 6
| 2
| 12
| 76
| 8
| 64
| 27
| 51
| 7
| 36
| 17
| 33
| 4
| 1
| 2
| 7
|
2,782
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/gpt2/modeling_gpt2.py
|
transformers.models.gpt2.modeling_gpt2.GPT2DoubleHeadsModel
|
from typing import Callable, Optional, Union
from ...utils import ModelOutput, add_start_docstrings, auto_docstring, logging
from ...generation import GenerationMixin
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
import warnings
from torch import nn
from ...utils.model_parallel_utils import assert_device_map, get_device_map
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache
import torch
@auto_docstring(custom_intro='\n The GPT2 Model transformer with a language modeling and a multiple-choice classification head on top e.g. for\n RocStories/SWAG tasks. The two heads are two linear layers. The language modeling head has its weights tied to the\n input embeddings, the classification head takes as input the input of a specified classification token index in the\n input sequence).\n ')
class GPT2DoubleHeadsModel(GPT2PreTrainedModel, GenerationMixin):
_tied_weights_keys = ['lm_head.weight']
def __init__(self, config):
super().__init__(config)
config.num_labels = 1
self.transformer = GPT2Model(config)
self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
self.multiple_choice_head = GPT2SequenceSummary(config)
self.model_parallel = False
self.device_map = None
self.post_init()
@add_start_docstrings(PARALLELIZE_DOCSTRING)
def parallelize(self, device_map=None):
warnings.warn("`GPT2DoubleHeadsModel.parallelize` is deprecated and will be removed in v5 of Transformers, you should load your model with `device_map='balanced'` in the call to `from_pretrained`. You can also provide your own `device_map` but it needs to be a dictionary module_name to device, so for instance {'transformer.h.0': 0, 'transformer.h.1': 1, ...}", FutureWarning)
self.device_map = get_device_map(len(self.transformer.h), range(torch.cuda.device_count())) if device_map is None else device_map
assert_device_map(self.device_map, len(self.transformer.h))
self.transformer.parallelize(self.device_map)
self.lm_head = self.lm_head.to(self.transformer.first_device)
self.multiple_choice_head = self.multiple_choice_head.to(self.transformer.first_device)
self.model_parallel = True
@add_start_docstrings(DEPARALLELIZE_DOCSTRING)
def deparallelize(self):
warnings.warn('Like `parallelize`, `deparallelize` is deprecated and will be removed in v5 of Transformers.', FutureWarning)
self.transformer.deparallelize()
self.transformer = self.transformer.to('cpu')
self.lm_head = self.lm_head.to('cpu')
self.multiple_choice_head = self.multiple_choice_head.to('cpu')
self.model_parallel = False
torch.cuda.empty_cache()
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, cache_position: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, mc_token_ids: Optional[torch.LongTensor]=None, labels: Optional[torch.LongTensor]=None, mc_labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, **kwargs) -> Union[tuple, GPT2DoubleHeadsModelOutput]:
"""
input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`):
`input_ids_length` = `sequence_length` if `past_key_values` is `None` else
`past_key_values.get_seq_length()` (`sequence_length` of input past key value states). Indices of input
sequence tokens in the vocabulary.
If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as
`input_ids`.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
mc_token_ids (`torch.LongTensor` of shape `(batch_size, num_choices)`, *optional*, default to index of the last token of the input):
Index of the classification token in each input sequence. Selected in the range `[0, input_ids.size(-1) -
1]`.
labels (`torch.LongTensor` of shape `(batch_size, input_ids_length)`, *optional*):
Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
`labels = input_ids`. Indices are selected in `[-100, 0, ..., config.vocab_size - 1]`. All labels set to
`-100` are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size - 1]`
mc_labels (`torch.LongTensor` of shape `(batch_size)`, *optional*):
Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices]`
where *num_choices* is the size of the second dimension of the input tensors. (see *input_ids* above)
Example:
```python
>>> import torch
>>> from transformers import AutoTokenizer, GPT2DoubleHeadsModel
>>> tokenizer = AutoTokenizer.from_pretrained("openai-community/gpt2")
>>> model = GPT2DoubleHeadsModel.from_pretrained("openai-community/gpt2")
>>> # Add a [CLS] to the vocabulary (we should train it also!)
>>> num_added_tokens = tokenizer.add_special_tokens({"cls_token": "[CLS]"})
>>> # Update the model embeddings with the new vocabulary size
>>> embedding_layer = model.resize_token_embeddings(len(tokenizer))
>>> choices = ["Hello, my dog is cute [CLS]", "Hello, my cat is cute [CLS]"]
>>> encoded_choices = [tokenizer.encode(s) for s in choices]
>>> cls_token_location = [tokens.index(tokenizer.cls_token_id) for tokens in encoded_choices]
>>> input_ids = torch.tensor(encoded_choices).unsqueeze(0) # Batch size: 1, number of choices: 2
>>> mc_token_ids = torch.tensor([cls_token_location]) # Batch size: 1
>>> outputs = model(input_ids, mc_token_ids=mc_token_ids)
>>> lm_logits = outputs.logits
>>> mc_logits = outputs.mc_logits
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
transformer_outputs = self.transformer(input_ids, past_key_values=past_key_values, cache_position=cache_position, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
hidden_states = transformer_outputs[0]
if self.model_parallel:
torch.cuda.set_device(self.transformer.first_device)
hidden_states = hidden_states.to(self.lm_head.weight.device)
lm_logits = self.lm_head(hidden_states)
mc_logits = self.multiple_choice_head(hidden_states, mc_token_ids).squeeze(-1)
mc_loss = None
if mc_labels is not None:
loss_fct = CrossEntropyLoss()
mc_loss = loss_fct(mc_logits.view(-1, mc_logits.size(-1)), mc_labels.view(-1))
lm_loss = None
if labels is not None:
labels = labels.to(lm_logits.device)
shift_logits = lm_logits[..., :-1, :].contiguous()
shift_labels = labels[..., 1:].contiguous()
loss_fct = CrossEntropyLoss()
lm_loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
if not return_dict:
output = (lm_logits, mc_logits) + transformer_outputs[1:]
if mc_loss is not None:
output = (mc_loss,) + output
return (lm_loss,) + output if lm_loss is not None else output
return GPT2DoubleHeadsModelOutput(loss=lm_loss, mc_loss=mc_loss, logits=lm_logits, mc_logits=mc_logits, past_key_values=transformer_outputs.past_key_values, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions)
|
@auto_docstring(custom_intro='\n The GPT2 Model transformer with a language modeling and a multiple-choice classification head on top e.g. for\n RocStories/SWAG tasks. The two heads are two linear layers. The language modeling head has its weights tied to the\n input embeddings, the classification head takes as input the input of a specified classification token index in the\n input sequence).\n ')
class GPT2DoubleHeadsModel(GPT2PreTrainedModel, GenerationMixin):
def __init__(self, config):
pass
@add_start_docstrings(PARALLELIZE_DOCSTRING)
def parallelize(self, device_map=None):
pass
@add_start_docstrings(DEPARALLELIZE_DOCSTRING)
def deparallelize(self):
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, cache_position: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, mc_token_ids: Optional[torch.LongTensor]=None, labels: Optional[torch.LongTensor]=None, mc_labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, **kwargs) -> Union[tuple, GPT2DoubleHeadsModelOutput]:
'''
input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`):
`input_ids_length` = `sequence_length` if `past_key_values` is `None` else
`past_key_values.get_seq_length()` (`sequence_length` of input past key value states). Indices of input
sequence tokens in the vocabulary.
If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as
`input_ids`.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
mc_token_ids (`torch.LongTensor` of shape `(batch_size, num_choices)`, *optional*, default to index of the last token of the input):
Index of the classification token in each input sequence. Selected in the range `[0, input_ids.size(-1) -
1]`.
labels (`torch.LongTensor` of shape `(batch_size, input_ids_length)`, *optional*):
Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
`labels = input_ids`. Indices are selected in `[-100, 0, ..., config.vocab_size - 1]`. All labels set to
`-100` are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size - 1]`
mc_labels (`torch.LongTensor` of shape `(batch_size)`, *optional*):
Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices]`
where *num_choices* is the size of the second dimension of the input tensors. (see *input_ids* above)
Example:
```python
>>> import torch
>>> from transformers import AutoTokenizer, GPT2DoubleHeadsModel
>>> tokenizer = AutoTokenizer.from_pretrained("openai-community/gpt2")
>>> model = GPT2DoubleHeadsModel.from_pretrained("openai-community/gpt2")
>>> # Add a [CLS] to the vocabulary (we should train it also!)
>>> num_added_tokens = tokenizer.add_special_tokens({"cls_token": "[CLS]"})
>>> # Update the model embeddings with the new vocabulary size
>>> embedding_layer = model.resize_token_embeddings(len(tokenizer))
>>> choices = ["Hello, my dog is cute [CLS]", "Hello, my cat is cute [CLS]"]
>>> encoded_choices = [tokenizer.encode(s) for s in choices]
>>> cls_token_location = [tokens.index(tokenizer.cls_token_id) for tokens in encoded_choices]
>>> input_ids = torch.tensor(encoded_choices).unsqueeze(0) # Batch size: 1, number of choices: 2
>>> mc_token_ids = torch.tensor([cls_token_location]) # Batch size: 1
>>> outputs = model(input_ids, mc_token_ids=mc_token_ids)
>>> lm_logits = outputs.logits
>>> mc_logits = outputs.mc_logits
```'''
pass
| 9
| 1
| 24
| 2
| 16
| 6
| 2
| 0.33
| 2
| 8
| 2
| 0
| 6
| 5
| 7
| 9
| 182
| 24
| 119
| 47
| 87
| 39
| 59
| 24
| 51
| 8
| 2
| 2
| 15
|
2,783
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/gpt2/modeling_gpt2.py
|
transformers.models.gpt2.modeling_gpt2.GPT2DoubleHeadsModelOutput
|
from typing import Callable, Optional, Union
from ...utils import ModelOutput, add_start_docstrings, auto_docstring, logging
from dataclasses import dataclass
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache
import torch
@dataclass
@auto_docstring(custom_intro='\n Base class for outputs of models predicting if two sentences are consecutive or not.\n ')
class GPT2DoubleHeadsModelOutput(ModelOutput):
"""
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Language modeling loss.
mc_loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `mc_labels` is provided):
Multiple choice classification loss.
logits (`torch.FloatTensor` of shape `(batch_size, num_choices, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
mc_logits (`torch.FloatTensor` of shape `(batch_size, num_choices)`):
Prediction scores of the multiple choice classification head (scores for each choice before SoftMax).
past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see
`past_key_values` input) to speed up sequential decoding.
"""
loss: Optional[torch.FloatTensor] = None
mc_loss: Optional[torch.FloatTensor] = None
logits: Optional[torch.FloatTensor] = None
mc_logits: Optional[torch.FloatTensor] = None
past_key_values: Optional[Cache] = None
hidden_states: Optional[tuple[torch.FloatTensor]] = None
attentions: Optional[tuple[torch.FloatTensor]] = None
|
@dataclass
@auto_docstring(custom_intro='\n Base class for outputs of models predicting if two sentences are consecutive or not.\n ')
class GPT2DoubleHeadsModelOutput(ModelOutput):
'''
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Language modeling loss.
mc_loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `mc_labels` is provided):
Multiple choice classification loss.
logits (`torch.FloatTensor` of shape `(batch_size, num_choices, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
mc_logits (`torch.FloatTensor` of shape `(batch_size, num_choices)`):
Prediction scores of the multiple choice classification head (scores for each choice before SoftMax).
past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see
`past_key_values` input) to speed up sequential decoding.
'''
pass
| 3
| 1
| 0
| 0
| 0
| 0
| 0
| 3.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 39
| 5
| 8
| 8
| 7
| 26
| 8
| 8
| 7
| 0
| 1
| 0
| 0
|
2,784
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/gpt2/modeling_gpt2.py
|
transformers.models.gpt2.modeling_gpt2.GPT2ForQuestionAnswering
|
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...utils import ModelOutput, add_start_docstrings, auto_docstring, logging
from typing import Callable, Optional, Union
import torch
from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions, QuestionAnsweringModelOutput, SequenceClassifierOutputWithPast, TokenClassifierOutput
from torch import nn
@auto_docstring
class GPT2ForQuestionAnswering(GPT2PreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.transformer = GPT2Model(config)
self.qa_outputs = nn.Linear(config.hidden_size, 2)
self.model_parallel = False
self.device_map = None
self.post_init()
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, start_positions: Optional[torch.LongTensor]=None, end_positions: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, QuestionAnsweringModelOutput]:
"""
input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`):
`input_ids_length` = `sequence_length` if `past_key_values` is `None` else
`past_key_values.get_seq_length()` (`sequence_length` of input past key value states). Indices of input
sequence tokens in the vocabulary.
If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as
`input_ids`.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.transformer(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1).contiguous()
end_logits = end_logits.squeeze(-1).contiguous()
total_loss = None
if start_positions is not None and end_positions is not None:
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1).to(start_logits.device)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1).to(end_logits.device)
ignored_index = start_logits.size(1)
start_positions = start_positions.clamp(0, ignored_index)
end_positions = end_positions.clamp(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
if not return_dict:
output = (start_logits, end_logits) + outputs[2:]
return (total_loss,) + output if total_loss is not None else output
return QuestionAnsweringModelOutput(loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
|
@auto_docstring
class GPT2ForQuestionAnswering(GPT2PreTrainedModel):
def __init__(self, config):
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, start_positions: Optional[torch.LongTensor]=None, end_positions: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, QuestionAnsweringModelOutput]:
'''
input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`):
`input_ids_length` = `sequence_length` if `past_key_values` is `None` else
`past_key_values.get_seq_length()` (`sequence_length` of input past key value states). Indices of input
sequence tokens in the vocabulary.
If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as
`input_ids`.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
'''
pass
| 5
| 1
| 43
| 5
| 31
| 7
| 4
| 0.2
| 1
| 4
| 2
| 0
| 2
| 5
| 2
| 4
| 94
| 10
| 70
| 32
| 47
| 14
| 34
| 18
| 31
| 7
| 2
| 2
| 8
|
2,785
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/gpt2/modeling_gpt2.py
|
transformers.models.gpt2.modeling_gpt2.GPT2ForSequenceClassification
|
from typing import Callable, Optional, Union
from ...utils import ModelOutput, add_start_docstrings, auto_docstring, logging
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from torch import nn
from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions, QuestionAnsweringModelOutput, SequenceClassifierOutputWithPast, TokenClassifierOutput
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache
import torch
@auto_docstring(custom_intro='\n The GPT2 Model transformer with a sequence classification head on top (linear layer).\n\n [`GPT2ForSequenceClassification`] uses the last token in order to do the classification, as other causal models\n (e.g. GPT-1) do.\n\n Since it does classification on the last token, it requires to know the position of the last token. If a\n `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If\n no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the\n padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in\n each row of the batch).\n ')
class GPT2ForSequenceClassification(GPT2PreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.transformer = GPT2Model(config)
self.score = nn.Linear(config.n_embd, self.num_labels, bias=False)
self.model_parallel = False
self.device_map = None
self.post_init()
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, SequenceClassifierOutputWithPast]:
"""
input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`):
`input_ids_length` = `sequence_length` if `past_key_values` is `None` else
`past_key_values.get_seq_length()` (`sequence_length` of input past key value states). Indices of input
sequence tokens in the vocabulary.
If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as
`input_ids`.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
transformer_outputs = self.transformer(input_ids, past_key_values=past_key_values, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
hidden_states = transformer_outputs[0]
logits = self.score(hidden_states)
if input_ids is not None:
batch_size, sequence_length = input_ids.shape[:2]
else:
batch_size, sequence_length = inputs_embeds.shape[:2]
if self.config.pad_token_id is None and batch_size != 1:
raise ValueError('Cannot handle batch sizes > 1 if no padding token is defined.')
if self.config.pad_token_id is None:
last_non_pad_token = -1
elif input_ids is not None:
non_pad_mask = (input_ids != self.config.pad_token_id).to(logits.device, torch.int32)
token_indices = torch.arange(input_ids.shape[-1], device=logits.device, dtype=torch.int32)
last_non_pad_token = (token_indices * non_pad_mask).argmax(-1)
else:
last_non_pad_token = -1
logger.warning_once(f'{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be unexpected if using padding tokens in conjunction with `inputs_embeds.`')
pooled_logits = logits[torch.arange(batch_size, device=logits.device), last_non_pad_token]
loss = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
self.config.problem_type = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = 'single_label_classification'
else:
self.config.problem_type = 'multi_label_classification'
if self.config.problem_type == 'regression':
loss_fct = MSELoss()
if self.num_labels == 1:
loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(pooled_logits, labels)
elif self.config.problem_type == 'single_label_classification':
loss_fct = CrossEntropyLoss()
loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))
elif self.config.problem_type == 'multi_label_classification':
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(pooled_logits, labels)
if not return_dict:
output = (pooled_logits,) + transformer_outputs[1:]
return (loss,) + output if loss is not None else output
return SequenceClassifierOutputWithPast(loss=loss, logits=pooled_logits, past_key_values=transformer_outputs.past_key_values, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions)
| null | 5
| 1
| 54
| 5
| 45
| 5
| 9
| 0.09
| 1
| 7
| 2
| 0
| 2
| 5
| 2
| 4
| 115
| 10
| 96
| 34
| 73
| 9
| 47
| 19
| 44
| 16
| 2
| 3
| 17
|
2,786
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/gpt2/modeling_gpt2.py
|
transformers.models.gpt2.modeling_gpt2.GPT2ForTokenClassification
|
from typing import Callable, Optional, Union
from ...utils import ModelOutput, add_start_docstrings, auto_docstring, logging
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from torch import nn
from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions, QuestionAnsweringModelOutput, SequenceClassifierOutputWithPast, TokenClassifierOutput
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache
import torch
@auto_docstring
class GPT2ForTokenClassification(GPT2PreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.transformer = GPT2Model(config)
if hasattr(config, 'classifier_dropout') and config.classifier_dropout is not None:
classifier_dropout = config.classifier_dropout
elif hasattr(config, 'hidden_dropout') and config.hidden_dropout is not None:
classifier_dropout = config.hidden_dropout
else:
classifier_dropout = 0.1
self.dropout = nn.Dropout(classifier_dropout)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.model_parallel = False
self.device_map = None
self.post_init()
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, TokenClassifierOutput]:
"""
input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`):
`input_ids_length` = `sequence_length` if `past_key_values` is `None` else
`past_key_values.get_seq_length()` (`sequence_length` of input past key value states). Indices of input
sequence tokens in the vocabulary.
If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as
`input_ids`.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
transformer_outputs = self.transformer(input_ids, past_key_values=past_key_values, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
hidden_states = transformer_outputs[0]
hidden_states = self.dropout(hidden_states)
logits = self.classifier(hidden_states)
loss = None
if labels is not None:
labels = labels.to(logits.device)
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + transformer_outputs[2:]
return (loss,) + output if loss is not None else output
return TokenClassifierOutput(loss=loss, logits=logits, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions)
|
@auto_docstring
class GPT2ForTokenClassification(GPT2PreTrainedModel):
def __init__(self, config):
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, TokenClassifierOutput]:
'''
input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`):
`input_ids_length` = `sequence_length` if `past_key_values` is `None` else
`past_key_values.get_seq_length()` (`sequence_length` of input past key value states). Indices of input
sequence tokens in the vocabulary.
If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as
`input_ids`.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
'''
pass
| 5
| 1
| 39
| 4
| 31
| 4
| 4
| 0.12
| 1
| 5
| 2
| 0
| 2
| 6
| 2
| 4
| 102
| 9
| 83
| 31
| 45
| 10
| 29
| 16
| 26
| 5
| 2
| 1
| 8
|
2,787
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/gpt2/modeling_gpt2.py
|
transformers.models.gpt2.modeling_gpt2.GPT2LMHeadModel
|
import torch
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache
from ...utils.model_parallel_utils import assert_device_map, get_device_map
from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions, QuestionAnsweringModelOutput, SequenceClassifierOutputWithPast, TokenClassifierOutput
from torch import nn
import warnings
from ...generation import GenerationMixin
from ...utils import ModelOutput, add_start_docstrings, auto_docstring, logging
from typing import Callable, Optional, Union
@auto_docstring(custom_intro='\n The GPT2 Model transformer with a language modeling head on top (linear layer with weights tied to the input\n embeddings).\n ')
class GPT2LMHeadModel(GPT2PreTrainedModel, GenerationMixin):
_tied_weights_keys = ['lm_head.weight']
def __init__(self, config):
super().__init__(config)
self.transformer = GPT2Model(config)
self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
self.model_parallel = False
self.device_map = None
self.post_init()
@add_start_docstrings(PARALLELIZE_DOCSTRING)
def parallelize(self, device_map=None):
warnings.warn("`GPT2LMHeadModel.parallelize` is deprecated and will be removed in v5 of Transformers, you should load your model with `device_map='balanced'` in the call to `from_pretrained`. You can also provide your own `device_map` but it needs to be a dictionary module_name to device, so for instance {'transformer.h.0': 0, 'transformer.h.1': 1, ...}", FutureWarning)
self.device_map = get_device_map(len(self.transformer.h), range(torch.cuda.device_count())) if device_map is None else device_map
assert_device_map(self.device_map, len(self.transformer.h))
self.transformer.parallelize(self.device_map)
self.lm_head = self.lm_head.to(self.transformer.first_device)
self.model_parallel = True
@add_start_docstrings(DEPARALLELIZE_DOCSTRING)
def deparallelize(self):
warnings.warn('Like `parallelize`, `deparallelize` is deprecated and will be removed in v5 of Transformers.', FutureWarning)
self.transformer.deparallelize()
self.transformer = self.transformer.to('cpu')
self.lm_head = self.lm_head.to('cpu')
self.model_parallel = False
torch.cuda.empty_cache()
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, cache_position: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, logits_to_keep: Union[int, torch.Tensor]=0, **kwargs) -> Union[tuple, CausalLMOutputWithCrossAttentions]:
"""
input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`):
`input_ids_length` = `sequence_length` if `past_key_values` is `None` else
`past_key_values.get_seq_length()` (`sequence_length` of input past key value states). Indices of input
sequence tokens in the vocabulary.
If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as
`input_ids`.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
labels (`torch.LongTensor` of shape `(batch_size, input_ids_length)`, *optional*):
Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
`labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100`
are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
transformer_outputs = self.transformer(input_ids, past_key_values=past_key_values, attention_mask=attention_mask, cache_position=cache_position, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
hidden_states = transformer_outputs[0]
if self.model_parallel:
torch.cuda.set_device(self.transformer.first_device)
hidden_states = hidden_states.to(self.lm_head.weight.device)
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :])
loss = None
if labels is not None:
loss = self.loss_function(logits, labels, vocab_size=self.config.vocab_size, **kwargs)
if not return_dict:
output = (logits,) + transformer_outputs[1:]
return (loss,) + output if loss is not None else output
return CausalLMOutputWithCrossAttentions(loss=loss, logits=logits, past_key_values=transformer_outputs.past_key_values, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, cross_attentions=transformer_outputs.cross_attentions)
|
@auto_docstring(custom_intro='\n The GPT2 Model transformer with a language modeling head on top (linear layer with weights tied to the input\n embeddings).\n ')
class GPT2LMHeadModel(GPT2PreTrainedModel, GenerationMixin):
def __init__(self, config):
pass
@add_start_docstrings(PARALLELIZE_DOCSTRING)
def parallelize(self, device_map=None):
pass
@add_start_docstrings(DEPARALLELIZE_DOCSTRING)
def deparallelize(self):
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, cache_position: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, logits_to_keep: Union[int, torch.Tensor]=0, **kwargs) -> Union[tuple, CausalLMOutputWithCrossAttentions]:
'''
input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`):
`input_ids_length` = `sequence_length` if `past_key_values` is `None` else
`past_key_values.get_seq_length()` (`sequence_length` of input past key value states). Indices of input
sequence tokens in the vocabulary.
If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as
`input_ids`.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
labels (`torch.LongTensor` of shape `(batch_size, input_ids_length)`, *optional*):
Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
`labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100`
are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`
'''
pass
| 9
| 1
| 18
| 1
| 15
| 2
| 2
| 0.13
| 2
| 8
| 2
| 0
| 6
| 4
| 7
| 9
| 144
| 15
| 114
| 41
| 78
| 15
| 44
| 18
| 36
| 6
| 2
| 1
| 13
|
2,788
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/gpt2/modeling_gpt2.py
|
transformers.models.gpt2.modeling_gpt2.GPT2MLP
|
import torch
from ...pytorch_utils import Conv1D, find_pruneable_heads_and_indices, prune_conv1d_layer
from torch import nn
from typing import Callable, Optional, Union
from ...activations import ACT2FN, get_activation
class GPT2MLP(nn.Module):
def __init__(self, intermediate_size, config):
super().__init__()
embed_dim = config.hidden_size
self.c_fc = Conv1D(intermediate_size, embed_dim)
self.c_proj = Conv1D(embed_dim, intermediate_size)
self.act = ACT2FN[config.activation_function]
self.dropout = nn.Dropout(config.resid_pdrop)
def forward(self, hidden_states: Optional[tuple[torch.FloatTensor]]) -> torch.FloatTensor:
hidden_states = self.c_fc(hidden_states)
hidden_states = self.act(hidden_states)
hidden_states = self.c_proj(hidden_states)
hidden_states = self.dropout(hidden_states)
return hidden_states
|
class GPT2MLP(nn.Module):
def __init__(self, intermediate_size, config):
pass
def forward(self, hidden_states: Optional[tuple[torch.FloatTensor]]) -> torch.FloatTensor:
pass
| 3
| 0
| 7
| 0
| 7
| 0
| 1
| 0
| 1
| 2
| 1
| 0
| 2
| 4
| 2
| 12
| 15
| 1
| 14
| 8
| 11
| 0
| 14
| 8
| 11
| 1
| 1
| 0
| 2
|
2,789
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/gpt2/modeling_gpt2.py
|
transformers.models.gpt2.modeling_gpt2.GPT2Model
|
from ...modeling_attn_mask_utils import _prepare_4d_attention_mask_for_sdpa
import torch
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache
from ...utils.model_parallel_utils import assert_device_map, get_device_map
from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions, QuestionAnsweringModelOutput, SequenceClassifierOutputWithPast, TokenClassifierOutput
from torch import nn
import warnings
from ...utils import ModelOutput, add_start_docstrings, auto_docstring, logging
from ...masking_utils import create_causal_mask
from typing import Callable, Optional, Union
@auto_docstring
class GPT2Model(GPT2PreTrainedModel):
_supports_param_buffer_assignment = False
def __init__(self, config):
super().__init__(config)
self.embed_dim = config.hidden_size
self.wte = nn.Embedding(config.vocab_size, self.embed_dim)
self.wpe = nn.Embedding(config.max_position_embeddings, self.embed_dim)
self.drop = nn.Dropout(config.embd_pdrop)
self.h = nn.ModuleList([GPT2Block(config, layer_idx=i) for i in range(config.num_hidden_layers)])
self.ln_f = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon)
self.model_parallel = False
self.device_map = None
self.gradient_checkpointing = False
self._attn_implementation = config._attn_implementation
self.post_init()
@add_start_docstrings(PARALLELIZE_DOCSTRING)
def parallelize(self, device_map=None):
warnings.warn("`GPT2Model.parallelize` is deprecated and will be removed in v5 of Transformers, you should load your model with `device_map='balanced'` in the call to `from_pretrained`. You can also provide your own `device_map` but it needs to be a dictionary module_name to device, so for instance {'h.0': 0, 'h.1': 1, ...}", FutureWarning)
self.device_map = get_device_map(len(self.h), range(torch.cuda.device_count())) if device_map is None else device_map
assert_device_map(self.device_map, len(self.h))
self.model_parallel = True
self.first_device = 'cpu' if 'cpu' in self.device_map else 'cuda:' + str(min(self.device_map.keys()))
self.last_device = 'cuda:' + str(max(self.device_map.keys()))
self.wte = self.wte.to(self.first_device)
self.wpe = self.wpe.to(self.first_device)
for k, v in self.device_map.items():
for block in v:
cuda_device = 'cuda:' + str(k)
self.h[block] = self.h[block].to(cuda_device)
self.ln_f = self.ln_f.to(self.last_device)
@add_start_docstrings(DEPARALLELIZE_DOCSTRING)
def deparallelize(self):
warnings.warn('Like `parallelize`, `deparallelize` is deprecated and will be removed in v5 of Transformers.', FutureWarning)
self.model_parallel = False
self.device_map = None
self.first_device = 'cpu'
self.last_device = 'cpu'
self.wte = self.wte.to('cpu')
self.wpe = self.wpe.to('cpu')
for index in range(len(self.h)):
self.h[index] = self.h[index].to('cpu')
self.ln_f = self.ln_f.to('cpu')
torch.cuda.empty_cache()
def get_input_embeddings(self):
return self.wte
def set_input_embeddings(self, new_embeddings):
self.wte = new_embeddings
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
"""
for layer, heads in heads_to_prune.items():
self.h[layer].attn.prune_heads(heads)
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, cache_position: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, **kwargs) -> Union[tuple, BaseModelOutputWithPastAndCrossAttentions]:
"""
input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`):
`input_ids_length` = `sequence_length` if `past_key_values` is `None` else
`past_key_values.get_seq_length()` (`sequence_length` of input past key value states). Indices of input
sequence tokens in the vocabulary.
If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as
`input_ids`.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time')
elif input_ids is not None:
self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
batch_size = input_ids.shape[0]
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
batch_size = inputs_embeds.shape[0]
else:
raise ValueError('You have to specify either input_ids or inputs_embeds')
device = input_ids.device if input_ids is not None else inputs_embeds.device
if token_type_ids is not None:
token_type_ids = token_type_ids.view(-1, input_shape[-1])
if self.gradient_checkpointing and self.training:
if use_cache:
logger.warning_once('`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...')
use_cache = False
if use_cache:
if past_key_values is None:
past_key_values = DynamicCache(config=self.config)
elif isinstance(past_key_values, tuple):
logger.warning_once('Passing a tuple of `past_key_values` is deprecated and will be removed in Transformers v4.53.0. You should pass an instance of `Cache` instead, e.g. `past_key_values=DynamicCache.from_legacy_cache(past_key_values)`.')
past_key_values = DynamicCache.from_legacy_cache(past_key_values)
if self.config.add_cross_attention and (not isinstance(past_key_values, EncoderDecoderCache)):
past_key_values = EncoderDecoderCache(past_key_values, DynamicCache(config=self.config))
if inputs_embeds is None:
inputs_embeds = self.wte(input_ids)
if cache_position is None:
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
cache_position = torch.arange(past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device)
if position_ids is None:
position_ids = cache_position.unsqueeze(0)
position_embeds = self.wpe(position_ids)
hidden_states = inputs_embeds + position_embeds.to(inputs_embeds.device)
if attention_mask is not None and attention_mask.ndim < 4:
attention_mask = attention_mask.view(batch_size, -1)
causal_mask = create_causal_mask(config=self.config, input_embeds=inputs_embeds, attention_mask=attention_mask, cache_position=cache_position, past_key_values=past_key_values, position_ids=position_ids)
_use_sdpa = self._attn_implementation == 'sdpa' and output_attentions is False and (head_mask is None)
if self.config.add_cross_attention and encoder_hidden_states is not None:
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
if _use_sdpa:
encoder_attention_mask = _prepare_4d_attention_mask_for_sdpa(mask=encoder_attention_mask, dtype=inputs_embeds.dtype, tgt_len=input_shape[-1])
elif self._attn_implementation != 'flash_attention_2':
encoder_attention_mask = self.invert_attention_mask(encoder_attention_mask)
else:
encoder_attention_mask = None
head_mask = self.get_head_mask(head_mask, self.config.n_layer)
if token_type_ids is not None:
token_type_embeds = self.wte(token_type_ids)
hidden_states = hidden_states + token_type_embeds
hidden_states = self.drop(hidden_states)
output_shape = (-1,) + input_shape[1:] + (hidden_states.size(-1),)
all_self_attentions = () if output_attentions else None
all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
all_hidden_states = () if output_hidden_states else None
for i, block in enumerate(self.h):
if self.model_parallel:
torch.cuda.set_device(hidden_states.device)
if isinstance(head_mask, torch.Tensor):
head_mask = head_mask.to(hidden_states.device)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
outputs = block(hidden_states, past_key_values if not (self.gradient_checkpointing and self.training) else None, cache_position, causal_mask, head_mask[i], encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, use_cache=use_cache, output_attentions=output_attentions, **kwargs)
hidden_states = outputs[0]
if output_attentions:
all_self_attentions = all_self_attentions + (outputs[1],)
if self.config.add_cross_attention:
all_cross_attentions = all_cross_attentions + (outputs[2],)
if self.model_parallel:
for k, v in self.device_map.items():
if i == v[-1] and 'cuda:' + str(k) != self.last_device:
hidden_states = hidden_states.to('cuda:' + str(k + 1))
hidden_states = self.ln_f(hidden_states)
hidden_states = hidden_states.view(output_shape)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
past_key_values = past_key_values if use_cache else None
if not return_dict:
return tuple((v for v in [hidden_states, past_key_values, all_hidden_states, all_self_attentions, all_cross_attentions] if v is not None))
return BaseModelOutputWithPastAndCrossAttentions(last_hidden_state=hidden_states, past_key_values=past_key_values, hidden_states=all_hidden_states, attentions=all_self_attentions, cross_attentions=all_cross_attentions)
|
@auto_docstring
class GPT2Model(GPT2PreTrainedModel):
def __init__(self, config):
pass
@add_start_docstrings(PARALLELIZE_DOCSTRING)
def parallelize(self, device_map=None):
pass
@add_start_docstrings(DEPARALLELIZE_DOCSTRING)
def deparallelize(self):
pass
def get_input_embeddings(self):
pass
def set_input_embeddings(self, new_embeddings):
pass
def _prune_heads(self, heads_to_prune):
'''
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
'''
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, cache_position: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, **kwargs) -> Union[tuple, BaseModelOutputWithPastAndCrossAttentions]:
'''
input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`):
`input_ids_length` = `sequence_length` if `past_key_values` is `None` else
`past_key_values.get_seq_length()` (`sequence_length` of input past key value states). Indices of input
sequence tokens in the vocabulary.
If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as
`input_ids`.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
'''
pass
| 12
| 2
| 39
| 4
| 31
| 4
| 8
| 0.14
| 1
| 10
| 2
| 0
| 7
| 12
| 7
| 9
| 288
| 34
| 224
| 63
| 193
| 31
| 141
| 45
| 133
| 46
| 2
| 4
| 58
|
2,790
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/gpt2/modeling_gpt2.py
|
transformers.models.gpt2.modeling_gpt2.GPT2PreTrainedModel
|
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
import math
from ...pytorch_utils import Conv1D, find_pruneable_heads_and_indices, prune_conv1d_layer
from torch import nn
from .configuration_gpt2 import GPT2Config
from ...utils import ModelOutput, add_start_docstrings, auto_docstring, logging
@auto_docstring
class GPT2PreTrainedModel(PreTrainedModel):
config: GPT2Config
base_model_prefix = 'transformer'
is_parallelizable = True
supports_gradient_checkpointing = True
_no_split_modules = ['GPT2Block']
_skip_keys_device_placement = 'past_key_values'
_supports_flash_attn = True
_supports_sdpa = True
_supports_attention_backend = True
_can_compile_fullgraph = True
def __init__(self, *inputs, **kwargs):
super().__init__(*inputs, **kwargs)
def _init_weights(self, module):
"""Initialize the weights."""
if isinstance(module, (nn.Linear, Conv1D)):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
for name, p in module.named_parameters():
if name == 'c_proj.weight':
p.data.normal_(mean=0.0, std=self.config.initializer_range / math.sqrt(2 * self.config.n_layer))
|
@auto_docstring
class GPT2PreTrainedModel(PreTrainedModel):
def __init__(self, *inputs, **kwargs):
pass
def _init_weights(self, module):
'''Initialize the weights.'''
pass
| 4
| 1
| 14
| 1
| 9
| 5
| 5
| 0.52
| 1
| 2
| 1
| 6
| 2
| 0
| 2
| 2
| 45
| 4
| 27
| 13
| 24
| 14
| 25
| 13
| 22
| 8
| 1
| 2
| 9
|
2,791
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/gpt2/tokenization_gpt2.py
|
transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer
|
from typing import Optional
import json
import os
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
import regex as re
class GPT2Tokenizer(PreTrainedTokenizer):
"""
Construct a GPT-2 tokenizer. Based on byte-level Byte-Pair-Encoding.
This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will
be encoded differently whether it is at the beginning of the sentence (without space) or not:
```python
>>> from transformers import GPT2Tokenizer
>>> tokenizer = GPT2Tokenizer.from_pretrained("openai-community/gpt2")
>>> tokenizer("Hello world")["input_ids"]
[15496, 995]
>>> tokenizer(" Hello world")["input_ids"]
[18435, 995]
```
You can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer or when you
call it on some text, but since the model was not pretrained this way, it might yield a decrease in performance.
<Tip>
When used with `is_split_into_words=True`, this tokenizer will add a space before each word (even the first one).
</Tip>
This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
this superclass for more information regarding those methods.
Args:
vocab_file (`str`):
Path to the vocabulary file.
merges_file (`str`):
Path to the merges file.
errors (`str`, *optional*, defaults to `"replace"`):
Paradigm to follow when decoding bytes to UTF-8. See
[bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information.
unk_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
bos_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
The beginning of sequence token.
eos_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
The end of sequence token.
pad_token (`str`, *optional*):
The token used for padding, for example when batching sequences of different lengths.
add_prefix_space (`bool`, *optional*, defaults to `False`):
Whether or not to add an initial space to the input. This allows to treat the leading word just as any
other word. (GPT2 tokenizer detect beginning of words by the preceding space).
add_bos_token (`bool`, *optional*, defaults to `False`):
Whether or not to add an initial beginning of sentence token to the input. This allows to treat the leading
word just as any other word.
"""
vocab_files_names = VOCAB_FILES_NAMES
model_input_names = ['input_ids', 'attention_mask']
def __init__(self, vocab_file, merges_file, errors='replace', unk_token='<|endoftext|>', bos_token='<|endoftext|>', eos_token='<|endoftext|>', pad_token=None, add_prefix_space=False, add_bos_token=False, **kwargs):
bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token
eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token
unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token
pad_token = AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token
self.add_bos_token = add_bos_token
with open(vocab_file, encoding='utf-8') as vocab_handle:
self.encoder = json.load(vocab_handle)
self.decoder = {v: k for k, v in self.encoder.items()}
self.errors = errors
self.byte_encoder = bytes_to_unicode()
self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
with open(merges_file, encoding='utf-8') as merges_handle:
bpe_merges = merges_handle.read().split('\n')[1:-1]
bpe_merges = [tuple(merge.split()) for merge in bpe_merges]
self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges))))
self.cache = {}
self.add_prefix_space = add_prefix_space
self.pat = re.compile("'s|'t|'re|'ve|'m|'ll|'d| ?\\p{L}+| ?\\p{N}+| ?[^\\s\\p{L}\\p{N}]+|\\s+(?!\\S)|\\s+")
super().__init__(errors=errors, unk_token=unk_token, bos_token=bos_token, eos_token=eos_token, pad_token=pad_token, add_prefix_space=add_prefix_space, add_bos_token=add_bos_token, **kwargs)
@property
def vocab_size(self):
return len(self.encoder)
def get_vocab(self):
return dict(self.encoder, **self.added_tokens_encoder)
def bpe(self, token):
if token in self.cache:
return self.cache[token]
word = tuple(token)
pairs = get_pairs(word)
if not pairs:
return token
while True:
bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float('inf')))
if bigram not in self.bpe_ranks:
break
first, second = bigram
new_word = []
i = 0
while i < len(word):
try:
j = word.index(first, i)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
i = j
if word[i] == first and i < len(word) - 1 and (word[i + 1] == second):
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if len(word) == 1:
break
else:
pairs = get_pairs(word)
word = ' '.join(word)
self.cache[token] = word
return word
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
if self.add_bos_token:
bos_token_ids = [self.bos_token_id]
else:
bos_token_ids = []
output = bos_token_ids + token_ids_0
if token_ids_1 is None:
return output
return output + bos_token_ids + token_ids_1
def get_special_tokens_mask(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None, already_has_special_tokens: bool=False) -> list[int]:
"""
Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer `prepare_for_model` or `encode_plus` methods.
Args:
token_ids_0 (`list[int]`):
List of IDs.
token_ids_1 (`list[int]`, *optional*):
Optional second list of IDs for sequence pairs.
already_has_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not the token list is already formatted with special tokens for the model.
Returns:
`list[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
"""
if already_has_special_tokens:
return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True)
if not self.add_bos_token:
return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=False)
if token_ids_1 is None:
return [1] + [0] * len(token_ids_0)
return [1] + [0] * len(token_ids_0) + [1] + [0] * len(token_ids_1)
def _tokenize(self, text):
"""Tokenize a string."""
bpe_tokens = []
for token in re.findall(self.pat, text):
token = ''.join((self.byte_encoder[b] for b in token.encode('utf-8')))
bpe_tokens.extend((bpe_token for bpe_token in self.bpe(token).split(' ')))
return bpe_tokens
def _convert_token_to_id(self, token):
"""Converts a token (str) in an id using the vocab."""
return self.encoder.get(token, self.encoder.get(self.unk_token))
def _convert_id_to_token(self, index):
"""Converts an index (integer) in a token (str) using the vocab."""
return self.decoder.get(index)
def convert_tokens_to_string(self, tokens):
"""Converts a sequence of tokens (string) in a single string."""
text = ''.join(tokens)
text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors=self.errors)
return text
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]:
if not os.path.isdir(save_directory):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
merge_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'])
with open(vocab_file, 'w', encoding='utf-8') as f:
f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + '\n')
index = 0
with open(merge_file, 'w', encoding='utf-8') as writer:
writer.write('#version: 0.2\n')
for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]):
if index != token_index:
logger.warning(f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive. Please check that the tokenizer is not corrupted!')
index = token_index
writer.write(' '.join(bpe_tokens) + '\n')
index += 1
return (vocab_file, merge_file)
def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs):
add_prefix_space = kwargs.pop('add_prefix_space', self.add_prefix_space)
if is_split_into_words or add_prefix_space:
text = ' ' + text
return (text, kwargs)
|
class GPT2Tokenizer(PreTrainedTokenizer):
'''
Construct a GPT-2 tokenizer. Based on byte-level Byte-Pair-Encoding.
This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will
be encoded differently whether it is at the beginning of the sentence (without space) or not:
```python
>>> from transformers import GPT2Tokenizer
>>> tokenizer = GPT2Tokenizer.from_pretrained("openai-community/gpt2")
>>> tokenizer("Hello world")["input_ids"]
[15496, 995]
>>> tokenizer(" Hello world")["input_ids"]
[18435, 995]
```
You can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer or when you
call it on some text, but since the model was not pretrained this way, it might yield a decrease in performance.
<Tip>
When used with `is_split_into_words=True`, this tokenizer will add a space before each word (even the first one).
</Tip>
This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
this superclass for more information regarding those methods.
Args:
vocab_file (`str`):
Path to the vocabulary file.
merges_file (`str`):
Path to the merges file.
errors (`str`, *optional*, defaults to `"replace"`):
Paradigm to follow when decoding bytes to UTF-8. See
[bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information.
unk_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
bos_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
The beginning of sequence token.
eos_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
The end of sequence token.
pad_token (`str`, *optional*):
The token used for padding, for example when batching sequences of different lengths.
add_prefix_space (`bool`, *optional*, defaults to `False`):
Whether or not to add an initial space to the input. This allows to treat the leading word just as any
other word. (GPT2 tokenizer detect beginning of words by the preceding space).
add_bos_token (`bool`, *optional*, defaults to `False`):
Whether or not to add an initial beginning of sentence token to the input. This allows to treat the leading
word just as any other word.
'''
def __init__(self, vocab_file, merges_file, errors='replace', unk_token='<|endoftext|>', bos_token='<|endoftext|>', eos_token='<|endoftext|>', pad_token=None, add_prefix_space=False, add_bos_token=False, **kwargs):
pass
@property
def vocab_size(self):
pass
def get_vocab(self):
pass
def bpe(self, token):
pass
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
pass
def get_special_tokens_mask(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None, already_has_special_tokens: bool=False) -> list[int]:
'''
Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer `prepare_for_model` or `encode_plus` methods.
Args:
token_ids_0 (`list[int]`):
List of IDs.
token_ids_1 (`list[int]`, *optional*):
Optional second list of IDs for sequence pairs.
already_has_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not the token list is already formatted with special tokens for the model.
Returns:
`list[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
'''
pass
def _tokenize(self, text):
'''Tokenize a string.'''
pass
def _convert_token_to_id(self, token):
'''Converts a token (str) in an id using the vocab.'''
pass
def _convert_id_to_token(self, index):
'''Converts an index (integer) in a token (str) using the vocab.'''
pass
def convert_tokens_to_string(self, tokens):
'''Converts a sequence of tokens (string) in a single string.'''
pass
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]:
pass
def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs):
pass
| 14
| 6
| 16
| 1
| 13
| 2
| 3
| 0.41
| 1
| 11
| 0
| 0
| 12
| 10
| 12
| 101
| 257
| 40
| 156
| 62
| 128
| 64
| 116
| 43
| 103
| 9
| 3
| 3
| 36
|
2,792
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/gpt2/tokenization_gpt2_fast.py
|
transformers.models.gpt2.tokenization_gpt2_fast.GPT2TokenizerFast
|
from typing import Optional
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...tokenization_utils_base import BatchEncoding
from .tokenization_gpt2 import GPT2Tokenizer
class GPT2TokenizerFast(PreTrainedTokenizerFast):
"""
Construct a "fast" GPT-2 tokenizer (backed by HuggingFace's *tokenizers* library). Based on byte-level
Byte-Pair-Encoding.
This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will
be encoded differently whether it is at the beginning of the sentence (without space) or not:
```python
>>> from transformers import GPT2TokenizerFast
>>> tokenizer = GPT2TokenizerFast.from_pretrained("openai-community/gpt2")
>>> tokenizer("Hello world")["input_ids"]
[15496, 995]
>>> tokenizer(" Hello world")["input_ids"]
[18435, 995]
```
You can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer, but since
the model was not pretrained this way, it might yield a decrease in performance.
<Tip>
When used with `is_split_into_words=True`, this tokenizer needs to be instantiated with `add_prefix_space=True`.
</Tip>
This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
refer to this superclass for more information regarding those methods.
Args:
vocab_file (`str`, *optional*):
Path to the vocabulary file.
merges_file (`str`, *optional*):
Path to the merges file.
tokenizer_file (`str`, *optional*):
Path to [tokenizers](https://github.com/huggingface/tokenizers) file (generally has a .json extension) that
contains everything needed to load the tokenizer.
unk_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
bos_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
The beginning of sequence token.
eos_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
The end of sequence token.
add_prefix_space (`bool`, *optional*, defaults to `False`):
Whether or not to add an initial space to the input. This allows to treat the leading word just as any
other word. (GPT2 tokenizer detect beginning of words by the preceding space).
"""
vocab_files_names = VOCAB_FILES_NAMES
model_input_names = ['input_ids', 'attention_mask']
slow_tokenizer_class = GPT2Tokenizer
def __init__(self, vocab_file=None, merges_file=None, tokenizer_file=None, unk_token='<|endoftext|>', bos_token='<|endoftext|>', eos_token='<|endoftext|>', add_prefix_space=False, **kwargs):
super().__init__(vocab_file=vocab_file, merges_file=merges_file, tokenizer_file=tokenizer_file, unk_token=unk_token, bos_token=bos_token, eos_token=eos_token, add_prefix_space=add_prefix_space, **kwargs)
self.add_bos_token = kwargs.pop('add_bos_token', False)
def _batch_encode_plus(self, *args, **kwargs) -> BatchEncoding:
is_split_into_words = kwargs.get('is_split_into_words', False)
assert self.add_prefix_space or not is_split_into_words, f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with pretokenized inputs.'
return super()._batch_encode_plus(*args, **kwargs)
def _encode_plus(self, *args, **kwargs) -> BatchEncoding:
is_split_into_words = kwargs.get('is_split_into_words', False)
assert self.add_prefix_space or not is_split_into_words, f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with pretokenized inputs.'
return super()._encode_plus(*args, **kwargs)
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]:
files = self._tokenizer.model.save(save_directory, name=filename_prefix)
return tuple(files)
|
class GPT2TokenizerFast(PreTrainedTokenizerFast):
'''
Construct a "fast" GPT-2 tokenizer (backed by HuggingFace's *tokenizers* library). Based on byte-level
Byte-Pair-Encoding.
This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will
be encoded differently whether it is at the beginning of the sentence (without space) or not:
```python
>>> from transformers import GPT2TokenizerFast
>>> tokenizer = GPT2TokenizerFast.from_pretrained("openai-community/gpt2")
>>> tokenizer("Hello world")["input_ids"]
[15496, 995]
>>> tokenizer(" Hello world")["input_ids"]
[18435, 995]
```
You can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer, but since
the model was not pretrained this way, it might yield a decrease in performance.
<Tip>
When used with `is_split_into_words=True`, this tokenizer needs to be instantiated with `add_prefix_space=True`.
</Tip>
This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
refer to this superclass for more information regarding those methods.
Args:
vocab_file (`str`, *optional*):
Path to the vocabulary file.
merges_file (`str`, *optional*):
Path to the merges file.
tokenizer_file (`str`, *optional*):
Path to [tokenizers](https://github.com/huggingface/tokenizers) file (generally has a .json extension) that
contains everything needed to load the tokenizer.
unk_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
bos_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
The beginning of sequence token.
eos_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
The end of sequence token.
add_prefix_space (`bool`, *optional*, defaults to `False`):
Whether or not to add an initial space to the input. This allows to treat the leading word just as any
other word. (GPT2 tokenizer detect beginning of words by the preceding space).
'''
def __init__(self, vocab_file=None, merges_file=None, tokenizer_file=None, unk_token='<|endoftext|>', bos_token='<|endoftext|>', eos_token='<|endoftext|>', add_prefix_space=False, **kwargs):
pass
def _batch_encode_plus(self, *args, **kwargs) -> BatchEncoding:
pass
def _encode_plus(self, *args, **kwargs) -> BatchEncoding:
pass
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]:
pass
| 5
| 1
| 11
| 1
| 10
| 0
| 1
| 0.91
| 1
| 4
| 1
| 0
| 4
| 1
| 4
| 92
| 101
| 19
| 43
| 22
| 28
| 39
| 18
| 12
| 13
| 1
| 3
| 0
| 4
|
2,793
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/gpt_bigcode/configuration_gpt_bigcode.py
|
transformers.models.gpt_bigcode.configuration_gpt_bigcode.GPTBigCodeConfig
|
from ...configuration_utils import PretrainedConfig
class GPTBigCodeConfig(PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`GPTBigCodeModel`]. It is used to instantiate a
GPTBigCode model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the GPTBigCode
[gpt_bigcode](https://huggingface.co/gpt_bigcode) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 50257):
Vocabulary size of the GPT-2 model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`GPTBigCodeModel`].
n_positions (`int`, *optional*, defaults to 1024):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
n_embd (`int`, *optional*, defaults to 768):
Dimensionality of the embeddings and hidden states.
n_layer (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
n_head (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
n_inner (`int`, *optional*, defaults to None):
Dimensionality of the inner feed-forward layers. `None` will set it to 4 times n_embd
activation_function (`str`, *optional*, defaults to `"gelu_pytorch_tanh"`):
Activation function, to be selected in the list `["relu", "silu", "gelu", "tanh", "gelu_new",
"gelu_pytorch_tanh"]`.
resid_pdrop (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
embd_pdrop (`float`, *optional*, defaults to 0.1):
The dropout ratio for the embeddings.
attn_pdrop (`float`, *optional*, defaults to 0.1):
The dropout ratio for the attention.
layer_norm_epsilon (`float`, *optional*, defaults to 1e-5):
The epsilon to use in the layer normalization layers.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
scale_attn_weights (`bool`, *optional*, defaults to `True`):
Scale attention weights by dividing by sqrt(hidden_size)..
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
attention_softmax_in_fp32 (`bool`, *optional*, defaults to `True`):
Whether to call the fused softmax in float32.
scale_attention_softmax_in_fp32 (`bool`, *optional*, defaults to `True`):
Whether to scale the attention softmax in float32.
attention_type (`bool`, *optional*, defaults to `True`):
Whether to use Multi-Query Attion (`True`) or Multi-Head Attention (`False`).
Example:
```python
>>> from transformers import GPTBigCodeConfig, GPTBigCodeModel
>>> # Initializing a GPTBigCode configuration
>>> configuration = GPTBigCodeConfig()
>>> # Initializing a model (with random weights) from the configuration
>>> model = GPTBigCodeModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = 'gpt_bigcode'
keys_to_ignore_at_inference = ['past_key_values']
attribute_map = {'hidden_size': 'n_embd', 'max_position_embeddings': 'n_positions', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer'}
def __init__(self, vocab_size=50257, n_positions=1024, n_embd=768, n_layer=12, n_head=12, n_inner=None, activation_function='gelu_pytorch_tanh', resid_pdrop=0.1, embd_pdrop=0.1, attn_pdrop=0.1, layer_norm_epsilon=1e-05, initializer_range=0.02, scale_attn_weights=True, use_cache=True, bos_token_id=50256, eos_token_id=50256, attention_softmax_in_fp32=True, scale_attention_softmax_in_fp32=True, multi_query=True, **kwargs):
self.vocab_size = vocab_size
self.n_positions = n_positions
self.n_embd = n_embd
self.n_layer = n_layer
self.n_head = n_head
self.n_inner = n_inner
self.activation_function = activation_function
self.resid_pdrop = resid_pdrop
self.embd_pdrop = embd_pdrop
self.attn_pdrop = attn_pdrop
self.layer_norm_epsilon = layer_norm_epsilon
self.initializer_range = initializer_range
self.scale_attn_weights = scale_attn_weights
self.use_cache = use_cache
self.attention_softmax_in_fp32 = attention_softmax_in_fp32
self.scale_attention_softmax_in_fp32 = scale_attention_softmax_in_fp32
self.multi_query = multi_query
self.num_key_value_heads = 1 if multi_query else n_head
self.bos_token_id = bos_token_id
self.eos_token_id = eos_token_id
super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
|
class GPTBigCodeConfig(PretrainedConfig):
'''
This is the configuration class to store the configuration of a [`GPTBigCodeModel`]. It is used to instantiate a
GPTBigCode model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the GPTBigCode
[gpt_bigcode](https://huggingface.co/gpt_bigcode) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 50257):
Vocabulary size of the GPT-2 model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`GPTBigCodeModel`].
n_positions (`int`, *optional*, defaults to 1024):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
n_embd (`int`, *optional*, defaults to 768):
Dimensionality of the embeddings and hidden states.
n_layer (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
n_head (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
n_inner (`int`, *optional*, defaults to None):
Dimensionality of the inner feed-forward layers. `None` will set it to 4 times n_embd
activation_function (`str`, *optional*, defaults to `"gelu_pytorch_tanh"`):
Activation function, to be selected in the list `["relu", "silu", "gelu", "tanh", "gelu_new",
"gelu_pytorch_tanh"]`.
resid_pdrop (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
embd_pdrop (`float`, *optional*, defaults to 0.1):
The dropout ratio for the embeddings.
attn_pdrop (`float`, *optional*, defaults to 0.1):
The dropout ratio for the attention.
layer_norm_epsilon (`float`, *optional*, defaults to 1e-5):
The epsilon to use in the layer normalization layers.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
scale_attn_weights (`bool`, *optional*, defaults to `True`):
Scale attention weights by dividing by sqrt(hidden_size)..
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
attention_softmax_in_fp32 (`bool`, *optional*, defaults to `True`):
Whether to call the fused softmax in float32.
scale_attention_softmax_in_fp32 (`bool`, *optional*, defaults to `True`):
Whether to scale the attention softmax in float32.
attention_type (`bool`, *optional*, defaults to `True`):
Whether to use Multi-Query Attion (`True`) or Multi-Head Attention (`False`).
Example:
```python
>>> from transformers import GPTBigCodeConfig, GPTBigCodeModel
>>> # Initializing a GPTBigCode configuration
>>> configuration = GPTBigCodeConfig()
>>> # Initializing a model (with random weights) from the configuration
>>> model = GPTBigCodeModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```'''
def __init__(self, vocab_size=50257, n_positions=1024, n_embd=768, n_layer=12, n_head=12, n_inner=None, activation_function='gelu_pytorch_tanh', resid_pdrop=0.1, embd_pdrop=0.1, attn_pdrop=0.1, layer_norm_epsilon=1e-05, initializer_range=0.02, scale_attn_weights=True, use_cache=True, bos_token_id=50256, eos_token_id=50256, attention_softmax_in_fp32=True, scale_attention_softmax_in_fp32=True, multi_query=True, **kwargs):
pass
| 2
| 1
| 45
| 2
| 43
| 0
| 1
| 1.06
| 1
| 1
| 0
| 0
| 1
| 19
| 1
| 1
| 118
| 11
| 52
| 46
| 28
| 55
| 25
| 24
| 23
| 1
| 1
| 0
| 1
|
2,794
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/gpt_bigcode/modeling_gpt_bigcode.py
|
transformers.models.gpt_bigcode.modeling_gpt_bigcode.GPTBigCodeAttention
|
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache
import torch
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from typing import Callable, Optional, Union
from torch import nn
class GPTBigCodeAttention(nn.Module):
def __init__(self, config, is_cross_attention=False, layer_idx=None):
super().__init__()
self.config = config
self.mask_value = None
self.multi_query = config.multi_query
self.embed_dim = config.hidden_size
self.num_heads = config.num_attention_heads
self.head_dim = self.embed_dim // self.num_heads
self.kv_heads = 1 if self.multi_query else self.num_heads
self.kv_dim = self.kv_heads * self.head_dim
self.num_key_value_groups = self.num_heads // self.kv_heads
self.split_size = self.embed_dim
self.is_causal = True
if self.head_dim * self.num_heads != self.embed_dim:
raise ValueError(f'`embed_dim` must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {self.num_heads}).')
self.scale_attn_weights = config.scale_attn_weights
self.scaling = self.head_dim ** (-0.5) if config.scale_attn_weights else 1.0
self.is_cross_attention = is_cross_attention
self.layer_idx = layer_idx
self.attention_softmax_in_fp32 = config.attention_softmax_in_fp32
self.scale_attention_softmax_in_fp32 = config.scale_attention_softmax_in_fp32 and config.attention_softmax_in_fp32
self.attn_pdrop = config.attn_pdrop
if self.is_cross_attention:
if self.multi_query:
raise NotImplementedError('Multi-Query Attention not supported for cross_attention')
self.c_attn = nn.Linear(self.embed_dim, 2 * self.embed_dim)
self.q_attn = nn.Linear(self.embed_dim, self.embed_dim)
else:
self.c_attn = nn.Linear(self.embed_dim, self.embed_dim + 2 * self.kv_dim)
self.c_proj = nn.Linear(self.embed_dim, self.embed_dim)
self.attn_dropout = config.attn_pdrop
self.resid_dropout = nn.Dropout(config.resid_pdrop)
def forward(self, hidden_states: torch.Tensor, layer_past: Optional[Cache]=None, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, use_cache: Optional[bool]=False, output_attentions: Optional[bool]=False, cache_position: Optional[torch.Tensor]=None, **kwargs) -> Union[tuple[torch.Tensor, Optional[torch.Tensor]], tuple[torch.Tensor, Optional[torch.Tensor], tuple[torch.Tensor, ...]]]:
input_shape = hidden_states.shape[:-1]
if layer_past is not None:
if isinstance(layer_past, EncoderDecoderCache):
is_updated = layer_past.is_updated.get(self.layer_idx)
if self.is_cross_attention:
curr_past_key_value = layer_past.cross_attention_cache
else:
curr_past_key_value = layer_past.self_attention_cache
else:
curr_past_key_value = layer_past
if self.is_cross_attention:
if not hasattr(self, 'q_attn') or not self.is_cross_attention:
raise ValueError('If class is used as cross attention, the weights `q_attn` have to be defined. Please make sure to instantiate class with `GPTBigCodeAttention(..., is_cross_attention=True)`.')
if layer_past is not None and is_updated:
key = curr_past_key_value.layers[self.layer_idx].keys
value = curr_past_key_value.layers[self.layer_idx].values
else:
query = self.q_attn(hidden_states).view(*input_shape, -1, self.head_dim).transpose(1, 2)
key, value = self.c_attn(encoder_hidden_states).split((self.head_dim, self.head_dim), dim=-1)
elif self.multi_query:
query, key, value = self.c_attn(hidden_states).unsqueeze(1).split((self.embed_dim, self.kv_dim, self.kv_dim), dim=3)
query = query.view(*input_shape, -1, self.head_dim).transpose(1, 2)
else:
query, key, value = self.c_attn(hidden_states).view(*hidden_states.shape[:2], self.num_heads, 3 * self.head_dim).transpose(1, 2).split(3 * [self.head_dim], dim=3)
if layer_past is not None:
cache_position = cache_position if not self.is_cross_attention else None
key, value = curr_past_key_value.update(key, value, self.layer_idx, {'cache_position': cache_position})
if self.is_cross_attention:
layer_past.is_updated[self.layer_idx] = True
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != 'eager':
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(self, query, key, value, attention_mask, dropout=0.0 if not self.training else self.attn_dropout, scaling=self.scaling, head_mask=head_mask, **kwargs)
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
attn_output = self.c_proj(attn_output)
attn_output = self.resid_dropout(attn_output)
return (attn_output, attn_weights)
|
class GPTBigCodeAttention(nn.Module):
def __init__(self, config, is_cross_attention=False, layer_idx=None):
pass
def forward(self, hidden_states: torch.Tensor, layer_past: Optional[Cache]=None, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, use_cache: Optional[bool]=False, output_attentions: Optional[bool]=False, cache_position: Optional[torch.Tensor]=None, **kwargs) -> Union[tuple[torch.Tensor, Optional[torch.Tensor]], tuple[torch.Tensor, Optional[torch.Tensor], tuple[torch.Tensor, ...]]]:
pass
| 3
| 0
| 46
| 6
| 34
| 6
| 7
| 0.16
| 1
| 5
| 0
| 2
| 4
| 21
| 4
| 14
| 186
| 28
| 137
| 60
| 119
| 22
| 103
| 47
| 98
| 12
| 1
| 2
| 28
|
2,795
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/gpt_bigcode/modeling_gpt_bigcode.py
|
transformers.models.gpt_bigcode.modeling_gpt_bigcode.GPTBigCodeBlock
|
from typing import Callable, Optional, Union
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache
import torch
from torch import nn
class GPTBigCodeBlock(nn.Module):
def __init__(self, config, layer_idx=None):
super().__init__()
hidden_size = config.hidden_size
self.inner_dim = config.n_inner if config.n_inner is not None else 4 * hidden_size
self.ln_1 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
self.attn = GPTBigCodeAttention(config, layer_idx=layer_idx)
self.ln_2 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
if config.add_cross_attention:
if config.multi_query:
raise NotImplementedError('Cross-attention not implemented for MQA')
self.crossattention = GPTBigCodeAttention(config, is_cross_attention=True, layer_idx=layer_idx)
self.ln_cross_attn = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
self.mlp = GPTBigCodeMLP(self.inner_dim, config)
def forward(self, hidden_states: Optional[tuple[torch.Tensor]], layer_past: Optional[Cache]=None, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, use_cache: Optional[bool]=False, output_attentions: Optional[bool]=False, cache_position: Optional[torch.Tensor]=None, **kwargs) -> Union[tuple[torch.Tensor], tuple[torch.Tensor, torch.Tensor], tuple[torch.Tensor, torch.Tensor, torch.Tensor]]:
residual = hidden_states
hidden_states = self.ln_1(hidden_states)
attn_outputs = self.attn(hidden_states, layer_past=layer_past, attention_mask=attention_mask, head_mask=head_mask, use_cache=use_cache, output_attentions=output_attentions, cache_position=cache_position, **kwargs)
attn_output = attn_outputs[0]
outputs = attn_outputs[1:]
hidden_states = attn_output + residual
if encoder_hidden_states is not None:
if not hasattr(self, 'crossattention'):
raise ValueError(f'If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`')
residual = hidden_states
hidden_states = self.ln_cross_attn(hidden_states)
cross_attn_outputs = self.crossattention(hidden_states, attention_mask=attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_attentions=output_attentions, cache_position=cache_position, **kwargs)
attn_output = cross_attn_outputs[0]
hidden_states = residual + attn_output
outputs = outputs + cross_attn_outputs[1:]
residual = hidden_states
hidden_states = self.ln_2(hidden_states)
feed_forward_hidden_states = self.mlp(hidden_states)
hidden_states = residual + feed_forward_hidden_states
return (hidden_states,) + outputs
|
class GPTBigCodeBlock(nn.Module):
def __init__(self, config, layer_idx=None):
pass
def forward(self, hidden_states: Optional[tuple[torch.Tensor]], layer_past: Optional[Cache]=None, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, use_cache: Optional[bool]=False, output_attentions: Optional[bool]=False, cache_position: Optional[torch.Tensor]=None, **kwargs) -> Union[tuple[torch.Tensor], tuple[torch.Tensor, torch.Tensor], tuple[torch.Tensor, torch.Tensor, torch.Tensor]]:
pass
| 3
| 0
| 43
| 6
| 35
| 4
| 4
| 0.1
| 1
| 6
| 1
| 0
| 2
| 7
| 2
| 12
| 87
| 12
| 71
| 30
| 55
| 7
| 38
| 17
| 35
| 4
| 1
| 2
| 8
|
2,796
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/gpt_bigcode/modeling_gpt_bigcode.py
|
transformers.models.gpt_bigcode.modeling_gpt_bigcode.GPTBigCodeForCausalLM
|
from torch import nn
from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions, SequenceClassifierOutputWithPast, TokenClassifierOutput
from ...generation import GenerationMixin
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache
import torch
from typing import Callable, Optional, Union
from ...utils import auto_docstring, can_return_tuple, logging
@auto_docstring(custom_intro='\n The GPT_BIGCODE Model transformer with a language modeling head on top (linear layer with weights tied to the input\n embeddings).\n ')
class GPTBigCodeForCausalLM(GPTBigCodePreTrainedModel, GenerationMixin):
_tied_weights_keys = ['lm_head.weight']
def __init__(self, config):
super().__init__(config)
self.transformer = GPTBigCodeModel(config)
self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
self.post_init()
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None, **kwargs) -> Union[tuple, CausalLMOutputWithCrossAttentions]:
"""
input_ids (`torch.Tensor` of shape `(batch_size, input_ids_length)`):
`input_ids_length` = `sequence_length` if `past_key_values` is `None` else
`past_key_values.get_seq_length()` (`sequence_length` of input past key value states). Indices of input
sequence tokens in the vocabulary.
If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as
`input_ids`.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
labels (`torch.Tensor` of shape `(batch_size, input_ids_length)`, *optional*):
Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
`labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100`
are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
transformer_outputs = self.transformer(input_ids, past_key_values=past_key_values, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position)
hidden_states = transformer_outputs[0]
lm_logits = self.lm_head(hidden_states)
loss = None
if labels is not None:
loss = self.loss_function(lm_logits, labels, vocab_size=self.config.vocab_size, **kwargs)
if not return_dict:
output = (lm_logits,) + transformer_outputs[1:]
return (loss,) + output if loss is not None else output
return CausalLMOutputWithCrossAttentions(loss=loss, logits=lm_logits, past_key_values=transformer_outputs.past_key_values, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, cross_attentions=transformer_outputs.cross_attentions)
|
@auto_docstring(custom_intro='\n The GPT_BIGCODE Model transformer with a language modeling head on top (linear layer with weights tied to the input\n embeddings).\n ')
class GPTBigCodeForCausalLM(GPTBigCodePreTrainedModel, GenerationMixin):
def __init__(self, config):
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None, **kwargs) -> Union[tuple, CausalLMOutputWithCrossAttentions]:
'''
input_ids (`torch.Tensor` of shape `(batch_size, input_ids_length)`):
`input_ids_length` = `sequence_length` if `past_key_values` is `None` else
`past_key_values.get_seq_length()` (`sequence_length` of input past key value states). Indices of input
sequence tokens in the vocabulary.
If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as
`input_ids`.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
labels (`torch.Tensor` of shape `(batch_size, input_ids_length)`, *optional*):
Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
`labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100`
are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`
'''
pass
| 5
| 1
| 22
| 2
| 17
| 3
| 3
| 0.17
| 2
| 6
| 2
| 0
| 6
| 2
| 7
| 9
| 169
| 20
| 127
| 45
| 93
| 22
| 61
| 24
| 53
| 8
| 2
| 2
| 21
|
2,797
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/gpt_bigcode/modeling_gpt_bigcode.py
|
transformers.models.gpt_bigcode.modeling_gpt_bigcode.GPTBigCodeForSequenceClassification
|
import torch
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache
from ...utils import auto_docstring, can_return_tuple, logging
from typing import Callable, Optional, Union
from torch import nn
from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions, SequenceClassifierOutputWithPast, TokenClassifierOutput
@auto_docstring(custom_intro='\n The GPTBigCode Model transformer with a sequence classification head on top (linear layer).\n\n [`GPTBigCodeForSequenceClassification`] uses the last token in order to do the classification, as other causal\n models (e.g. GPT-1) do.\n\n Since it does classification on the last token, it requires to know the position of the last token. If a\n `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If\n no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the\n padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in\n each row of the batch).\n ')
class GPTBigCodeForSequenceClassification(GPTBigCodePreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.transformer = GPTBigCodeModel(config)
self.score = nn.Linear(config.n_embd, self.num_labels, bias=False)
self.post_init()
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, **kwargs) -> Union[tuple, SequenceClassifierOutputWithPast]:
"""
input_ids (`torch.Tensor` of shape `(batch_size, input_ids_length)`):
`input_ids_length` = `sequence_length` if `past_key_values` is `None` else
`past_key_values.get_seq_length()` (`sequence_length` of input past key value states). Indices of input
sequence tokens in the vocabulary.
If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as
`input_ids`.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
labels (`torch.Tensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
transformer_outputs = self.transformer(input_ids, past_key_values=past_key_values, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, **kwargs)
hidden_states = transformer_outputs[0]
logits = self.score(hidden_states)
if input_ids is not None:
batch_size, sequence_length = input_ids.shape[:2]
else:
batch_size, sequence_length = inputs_embeds.shape[:2]
if self.config.pad_token_id is None and batch_size != 1:
raise ValueError('Cannot handle batch sizes > 1 if no padding token is defined.')
if self.config.pad_token_id is None:
last_non_pad_token = -1
elif input_ids is not None:
non_pad_mask = (input_ids != self.config.pad_token_id).to(logits.device, torch.int32)
token_indices = torch.arange(input_ids.shape[-1], device=logits.device, dtype=torch.int32)
last_non_pad_token = (token_indices * non_pad_mask).argmax(-1)
else:
last_non_pad_token = -1
logger.warning_once(f'{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be unexpected if using padding tokens in conjunction with `inputs_embeds.`')
pooled_logits = logits[torch.arange(batch_size, device=logits.device), last_non_pad_token]
loss = None
if labels is not None:
labels = labels.to(logits.device)
if self.config.problem_type is None:
if self.num_labels == 1:
self.config.problem_type = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = 'single_label_classification'
else:
self.config.problem_type = 'multi_label_classification'
if self.config.problem_type == 'regression':
loss_fct = MSELoss()
if self.num_labels == 1:
loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(pooled_logits, labels)
elif self.config.problem_type == 'single_label_classification':
loss_fct = CrossEntropyLoss()
loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))
elif self.config.problem_type == 'multi_label_classification':
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(pooled_logits, labels)
if not return_dict:
output = (pooled_logits,) + transformer_outputs[1:]
return (loss,) + output if loss is not None else output
return SequenceClassifierOutputWithPast(loss=loss, logits=pooled_logits, past_key_values=transformer_outputs.past_key_values, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions)
| null | 5
| 1
| 53
| 5
| 44
| 4
| 9
| 0.09
| 1
| 7
| 2
| 0
| 2
| 3
| 2
| 4
| 108
| 10
| 90
| 32
| 72
| 8
| 46
| 17
| 43
| 16
| 2
| 3
| 17
|
2,798
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/gpt_bigcode/modeling_gpt_bigcode.py
|
transformers.models.gpt_bigcode.modeling_gpt_bigcode.GPTBigCodeForTokenClassification
|
from ...utils import auto_docstring, can_return_tuple, logging
from torch import nn
import torch
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions, SequenceClassifierOutputWithPast, TokenClassifierOutput
from typing import Callable, Optional, Union
@auto_docstring
class GPTBigCodeForTokenClassification(GPTBigCodePreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.transformer = GPTBigCodeModel(config)
if hasattr(config, 'classifier_dropout') and config.classifier_dropout is not None:
classifier_dropout = config.classifier_dropout
elif hasattr(config, 'hidden_dropout') and config.hidden_dropout is not None:
classifier_dropout = config.hidden_dropout
else:
classifier_dropout = 0.1
self.dropout = nn.Dropout(classifier_dropout)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.post_init()
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, TokenClassifierOutput]:
"""
input_ids (`torch.Tensor` of shape `(batch_size, input_ids_length)`):
`input_ids_length` = `sequence_length` if `past_key_values` is `None` else
`past_key_values.get_seq_length()` (`sequence_length` of input past key value states). Indices of input
sequence tokens in the vocabulary.
If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as
`input_ids`.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
labels (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
transformer_outputs = self.transformer(input_ids, past_key_values=past_key_values, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
hidden_states = transformer_outputs[0]
hidden_states = self.dropout(hidden_states)
logits = self.classifier(hidden_states)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1).to(logits.device))
if not return_dict:
output = (logits,) + transformer_outputs[2:]
return (loss,) + output if loss is not None else output
return TokenClassifierOutput(loss=loss, logits=logits, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions)
|
@auto_docstring
class GPTBigCodeForTokenClassification(GPTBigCodePreTrainedModel):
def __init__(self, config):
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, TokenClassifierOutput]:
'''
input_ids (`torch.Tensor` of shape `(batch_size, input_ids_length)`):
`input_ids_length` = `sequence_length` if `past_key_values` is `None` else
`past_key_values.get_seq_length()` (`sequence_length` of input past key value states). Indices of input
sequence tokens in the vocabulary.
If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as
`input_ids`.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
labels (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
'''
pass
| 5
| 1
| 36
| 4
| 29
| 4
| 4
| 0.12
| 1
| 5
| 2
| 0
| 2
| 4
| 2
| 4
| 75
| 8
| 60
| 29
| 42
| 7
| 26
| 14
| 23
| 5
| 2
| 1
| 8
|
2,799
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/gpt_bigcode/modeling_gpt_bigcode.py
|
transformers.models.gpt_bigcode.modeling_gpt_bigcode.GPTBigCodeMLP
|
from ...activations import ACT2FN
import torch
from torch import nn
from typing import Callable, Optional, Union
class GPTBigCodeMLP(nn.Module):
def __init__(self, intermediate_size, config):
super().__init__()
embed_dim = config.hidden_size
self.c_fc = nn.Linear(embed_dim, intermediate_size)
self.c_proj = nn.Linear(intermediate_size, embed_dim)
self.act = ACT2FN[config.activation_function]
self.dropout = nn.Dropout(config.resid_pdrop)
def forward(self, hidden_states: Optional[tuple[torch.FloatTensor]]) -> torch.FloatTensor:
hidden_states = self.c_fc(hidden_states)
hidden_states = self.act(hidden_states)
hidden_states = self.c_proj(hidden_states)
hidden_states = self.dropout(hidden_states)
return hidden_states
|
class GPTBigCodeMLP(nn.Module):
def __init__(self, intermediate_size, config):
pass
def forward(self, hidden_states: Optional[tuple[torch.FloatTensor]]) -> torch.FloatTensor:
pass
| 3
| 0
| 7
| 0
| 7
| 0
| 1
| 0.07
| 1
| 1
| 0
| 0
| 2
| 4
| 2
| 12
| 16
| 1
| 14
| 8
| 11
| 1
| 14
| 8
| 11
| 1
| 1
| 0
| 2
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.