id
int64 0
328k
| repository_name
stringlengths 7
58
| file_path
stringlengths 9
302
| class_name
stringlengths 5
256
| human_written_code
stringlengths 16
2.16M
| class_skeleton
stringlengths 18
1.49M
⌀ | total_program_units
int64 1
1.76k
| total_doc_str
int64 0
771
| AvgCountLine
float64 0
7.89k
| AvgCountLineBlank
float64 0
297
| AvgCountLineCode
float64 0
7.89k
| AvgCountLineComment
float64 0
7.89k
| AvgCyclomatic
float64 0
130
| CommentToCodeRatio
float64 0
168
| CountClassBase
float64 0
40
| CountClassCoupled
float64 0
583
| CountClassCoupledModified
float64 0
575
| CountClassDerived
float64 0
5.35k
| CountDeclInstanceMethod
float64 0
529
| CountDeclInstanceVariable
float64 0
296
| CountDeclMethod
float64 0
599
| CountDeclMethodAll
float64 0
1.12k
| CountLine
float64 1
40.4k
| CountLineBlank
float64 0
8.16k
| CountLineCode
float64 1
25.7k
| CountLineCodeDecl
float64 1
8.15k
| CountLineCodeExe
float64 0
24.2k
| CountLineComment
float64 0
16.5k
| CountStmt
float64 1
9.71k
| CountStmtDecl
float64 1
8.15k
| CountStmtExe
float64 0
9.69k
| MaxCyclomatic
float64 0
759
| MaxInheritanceTree
float64 0
16
| MaxNesting
float64 0
34
| SumCyclomatic
float64 0
2.9k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5,400
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/stablelm/modeling_stablelm.py
|
transformers.models.stablelm.modeling_stablelm.StableLmModel
|
from ...cache_utils import Cache, DynamicCache
from typing import Optional, Union
from ...modeling_attn_mask_utils import AttentionMaskConverter
from torch import nn
import torch
from ...utils import auto_docstring, can_return_tuple, is_torch_flex_attn_available, logging
from .configuration_stablelm import StableLmConfig
from ...modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast
@auto_docstring
class StableLmModel(StableLmPreTrainedModel):
"""
Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`StableLmDecoderLayer`]
Args:
config: StableLmConfig
"""
def __init__(self, config: StableLmConfig):
super().__init__(config)
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
self.layers = nn.ModuleList([StableLmDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)])
self.norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.rotary_emb = StableLmRotaryEmbedding(config=config)
self._attn_implementation = config._attn_implementation
self.gradient_checkpointing = False
self.post_init()
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None) -> BaseModelOutputWithPast:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
use_cache = use_cache if use_cache is not None else self.config.use_cache
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError('You must specify exactly one of input_ids or inputs_embeds')
if self.gradient_checkpointing and self.training:
if use_cache:
logger.warning_once('`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...')
use_cache = False
if use_cache and past_key_values is None:
past_key_values = DynamicCache(config=self.config)
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids)
if cache_position is None:
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
cache_position = torch.arange(past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device)
if position_ids is None:
position_ids = cache_position.unsqueeze(0)
causal_mask = self._update_causal_mask(attention_mask, inputs_embeds, cache_position, past_key_values, output_attentions)
hidden_states = inputs_embeds
position_embeddings = self.rotary_emb(hidden_states, position_ids)
all_hidden_states = () if output_hidden_states else None
all_self_attns = () if output_attentions else None
for decoder_layer in self.layers:
if output_hidden_states:
all_hidden_states += (hidden_states,)
layer_outputs = decoder_layer(hidden_states, attention_mask=causal_mask, position_ids=position_ids, past_key_values=past_key_values, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position, position_embeddings=position_embeddings)
hidden_states = layer_outputs[0]
if output_attentions:
all_self_attns += (layer_outputs[1],)
hidden_states = self.norm(hidden_states)
if output_hidden_states:
all_hidden_states += (hidden_states,)
return BaseModelOutputWithPast(last_hidden_state=hidden_states, past_key_values=past_key_values, hidden_states=all_hidden_states, attentions=all_self_attns)
def _update_causal_mask(self, attention_mask: Union[torch.Tensor, 'BlockMask'], input_tensor: torch.Tensor, cache_position: torch.Tensor, past_key_values: Cache, output_attentions: bool=False):
if self.config._attn_implementation == 'flash_attention_2':
if attention_mask is not None and (attention_mask == 0.0).any():
return attention_mask
return None
if self.config._attn_implementation == 'flex_attention':
if isinstance(attention_mask, torch.Tensor):
attention_mask = make_flex_block_causal_mask(attention_mask)
return attention_mask
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
using_compilable_cache = past_key_values.is_compileable if past_key_values is not None else False
if self.config._attn_implementation == 'sdpa' and (not using_compilable_cache) and (not output_attentions):
if AttentionMaskConverter._ignore_causal_mask_sdpa(attention_mask, inputs_embeds=input_tensor, past_key_values_length=past_seen_tokens, is_training=self.training):
return None
dtype = input_tensor.dtype
sequence_length = input_tensor.shape[1]
if using_compilable_cache:
target_length = past_key_values.get_max_cache_shape()
else:
target_length = attention_mask.shape[-1] if isinstance(attention_mask, torch.Tensor) else past_seen_tokens + sequence_length + 1
causal_mask = self._prepare_4d_causal_attention_mask_with_cache_position(attention_mask, sequence_length=sequence_length, target_length=target_length, dtype=dtype, cache_position=cache_position, batch_size=input_tensor.shape[0])
if self.config._attn_implementation == 'sdpa' and attention_mask is not None and (attention_mask.device.type in ['cuda', 'xpu', 'npu']) and (not output_attentions):
min_dtype = torch.finfo(dtype).min
causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype)
return causal_mask
@staticmethod
def _prepare_4d_causal_attention_mask_with_cache_position(attention_mask: torch.Tensor, sequence_length: int, target_length: int, dtype: torch.dtype, cache_position: torch.Tensor, batch_size: int, **kwargs):
"""
Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
`(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.
Args:
attention_mask (`torch.Tensor`):
A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape
`(batch_size, 1, query_length, key_value_length)`.
sequence_length (`int`):
The sequence length being processed.
target_length (`int`):
The target length: when generating with static cache, the mask should be as long as the static cache,
to account for the 0 padding, the part of the cache that is not filled yet.
dtype (`torch.dtype`):
The dtype to use for the 4D attention mask.
cache_position (`torch.Tensor`):
Indices depicting the position of the input sequence tokens in the sequence.
batch_size (`torch.Tensor`):
Batch size.
"""
if attention_mask is not None and attention_mask.dim() == 4:
causal_mask = attention_mask
else:
min_dtype = torch.finfo(dtype).min
causal_mask = torch.full((sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=cache_position.device)
if sequence_length != 1:
causal_mask = torch.triu(causal_mask, diagonal=1)
causal_mask *= torch.arange(target_length, device=cache_position.device) > cache_position.reshape(-1, 1)
causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1)
if attention_mask is not None:
causal_mask = causal_mask.clone()
mask_length = attention_mask.shape[-1]
padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :].to(causal_mask.device)
padding_mask = padding_mask == 0
causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(padding_mask, min_dtype)
return causal_mask
|
@auto_docstring
class StableLmModel(StableLmPreTrainedModel):
'''
Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`StableLmDecoderLayer`]
Args:
config: StableLmConfig
'''
def __init__(self, config: StableLmConfig):
pass
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None) -> BaseModelOutputWithPast:
pass
def _update_causal_mask(self, attention_mask: Union[torch.Tensor, 'BlockMask'], input_tensor: torch.Tensor, cache_position: torch.Tensor, past_key_values: Cache, output_attentions: bool=False):
pass
@staticmethod
def _prepare_4d_causal_attention_mask_with_cache_position(attention_mask: torch.Tensor, sequence_length: int, target_length: int, dtype: torch.dtype, cache_position: torch.Tensor, batch_size: int, **kwargs):
'''
Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
`(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.
Args:
attention_mask (`torch.Tensor`):
A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape
`(batch_size, 1, query_length, key_value_length)`.
sequence_length (`int`):
The sequence length being processed.
target_length (`int`):
The target length: when generating with static cache, the mask should be as long as the static cache,
to account for the 0 padding, the part of the cache that is not filled yet.
dtype (`torch.dtype`):
The dtype to use for the 4D attention mask.
cache_position (`torch.Tensor`):
Indices depicting the position of the input sequence tokens in the sequence.
batch_size (`torch.Tensor`):
Batch size.
'''
pass
| 9
| 2
| 44
| 5
| 33
| 6
| 7
| 0.22
| 1
| 15
| 8
| 0
| 5
| 8
| 6
| 7
| 279
| 36
| 201
| 67
| 164
| 44
| 103
| 37
| 96
| 26
| 2
| 2
| 42
|
5,401
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/stablelm/modeling_stablelm.py
|
transformers.models.stablelm.modeling_stablelm.StableLmPreTrainedModel
|
from .configuration_stablelm import StableLmConfig
from torch import nn
from ...utils import auto_docstring, can_return_tuple, is_torch_flex_attn_available, logging
from ...modeling_utils import PreTrainedModel
@auto_docstring
class StableLmPreTrainedModel(PreTrainedModel):
config: StableLmConfig
base_model_prefix = 'model'
supports_gradient_checkpointing = True
_no_split_modules = ['StableLmDecoderLayer']
_skip_keys_device_placement = 'past_key_values'
_supports_flash_attn = True
_supports_sdpa = True
_can_compile_fullgraph = True
def _init_weights(self, module):
std = self.config.initializer_range
if isinstance(module, nn.Linear):
module.weight.data.normal_(mean=0.0, std=std)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=std)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, nn.LayerNorm):
module.weight.data.fill_(1.0)
module.bias.data.zero_()
|
@auto_docstring
class StableLmPreTrainedModel(PreTrainedModel):
def _init_weights(self, module):
pass
| 3
| 0
| 10
| 0
| 10
| 0
| 5
| 0
| 1
| 0
| 0
| 4
| 1
| 0
| 1
| 1
| 22
| 1
| 21
| 13
| 19
| 0
| 20
| 13
| 18
| 5
| 1
| 2
| 5
|
5,402
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/stablelm/modeling_stablelm.py
|
transformers.models.stablelm.modeling_stablelm.StableLmRotaryEmbedding
|
from .configuration_stablelm import StableLmConfig
from ...modeling_rope_utils import ROPE_INIT_FUNCTIONS, dynamic_rope_update
from torch import nn
import torch
class StableLmRotaryEmbedding(nn.Module):
inv_freq: torch.Tensor
def __init__(self, config: StableLmConfig, device=None):
super().__init__()
if hasattr(config, 'rope_scaling') and isinstance(config.rope_scaling, dict):
self.rope_type = config.rope_scaling.get('rope_type', config.rope_scaling.get('type'))
else:
self.rope_type = 'default'
self.max_seq_len_cached = config.max_position_embeddings
self.original_max_seq_len = config.max_position_embeddings
self.config = config
self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device)
self.register_buffer('inv_freq', inv_freq, persistent=False)
self.original_inv_freq = self.inv_freq
@torch.no_grad()
@dynamic_rope_update
def forward(self, x, position_ids):
inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device)
position_ids_expanded = position_ids[:, None, :].float()
device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != 'mps' else 'cpu'
with torch.autocast(device_type=device_type, enabled=False):
freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
emb = torch.cat((freqs, freqs), dim=-1)
cos = emb.cos() * self.attention_scaling
sin = emb.sin() * self.attention_scaling
return (cos.to(dtype=x.dtype), sin.to(dtype=x.dtype))
|
class StableLmRotaryEmbedding(nn.Module):
def __init__(self, config: StableLmConfig, device=None):
pass
@torch.no_grad()
@dynamic_rope_update
def forward(self, x, position_ids):
pass
| 5
| 0
| 18
| 2
| 13
| 5
| 3
| 0.35
| 1
| 4
| 1
| 0
| 3
| 7
| 3
| 13
| 59
| 8
| 40
| 21
| 35
| 14
| 38
| 20
| 34
| 3
| 1
| 1
| 8
|
5,403
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/stablelm/modeling_stablelm.py
|
transformers.models.stablelm.modeling_stablelm.StableLmSdpaAttention
|
from ...cache_utils import Cache, DynamicCache
from typing import Optional, Union
import torch
from ...utils.deprecation import deprecate_kwarg
class StableLmSdpaAttention(StableLmAttention):
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, output_attentions: bool=False, use_cache: bool=False, cache_position: Optional[torch.LongTensor]=None, position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]]=None) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
if output_attentions:
logger.warning_once('StableLmModel is using StableLmSdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to the manual attention implementation, but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.')
return super().forward(hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position, position_embeddings=position_embeddings)
bsz, q_len, _ = hidden_states.size()
query_states = self.q_proj(hidden_states)
key_states = self.k_proj(hidden_states)
value_states = self.v_proj(hidden_states)
query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
if self.qk_layernorm:
query_states = self.q_layernorm(query_states)
key_states = self.k_layernorm(key_states)
cos, sin = position_embeddings
query_rot, query_pass = (query_states[..., :self.rotary_ndims], query_states[..., self.rotary_ndims:])
key_rot, key_pass = (key_states[..., :self.rotary_ndims], key_states[..., self.rotary_ndims:])
query_rot, key_rot = apply_rotary_pos_emb(query_rot, key_rot, cos, sin)
query_states = torch.cat((query_rot, query_pass), dim=-1)
key_states = torch.cat((key_rot, key_pass), dim=-1)
if past_key_values is not None:
cache_kwargs = {'sin': sin, 'cos': cos, 'partial_rotation_size': self.rotary_ndims, 'cache_position': cache_position}
key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
key_states = repeat_kv(key_states, self.num_key_value_groups)
value_states = repeat_kv(value_states, self.num_key_value_groups)
causal_mask = attention_mask
if attention_mask is not None:
causal_mask = attention_mask[:, :, :, :key_states.shape[-2]]
if query_states.device.type == 'cuda' and attention_mask is not None:
query_states = query_states.contiguous()
key_states = key_states.contiguous()
value_states = value_states.contiguous()
is_causal = bool(causal_mask is None and q_len > 1)
attn_output = torch.nn.functional.scaled_dot_product_attention(query_states, key_states, value_states, attn_mask=causal_mask, dropout_p=self.attention_dropout.p if self.training else 0.0, is_causal=is_causal)
attn_output = attn_output.transpose(1, 2).contiguous()
attn_output = attn_output.view(bsz, q_len, self.hidden_size)
attn_output = self.o_proj(attn_output)
return (attn_output, None)
|
class StableLmSdpaAttention(StableLmAttention):
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, output_attentions: bool=False, use_cache: bool=False, cache_position: Optional[torch.LongTensor]=None, position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]]=None) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
pass
| 3
| 0
| 105
| 16
| 78
| 13
| 8
| 0.16
| 1
| 4
| 1
| 0
| 1
| 0
| 1
| 13
| 106
| 16
| 79
| 23
| 67
| 13
| 39
| 13
| 37
| 8
| 2
| 1
| 8
|
5,404
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/starcoder2/configuration_starcoder2.py
|
transformers.models.starcoder2.configuration_starcoder2.Starcoder2Config
|
from ...modeling_rope_utils import rope_config_validation
from ...configuration_utils import PretrainedConfig
class Starcoder2Config(PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`Starcoder2Model`]. It is used to instantiate a
Starcoder2 model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the [bigcode/starcoder2-7b](https://huggingface.co/bigcode/starcoder2-7b) model.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 49152):
Vocabulary size of the Starcoder2 model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`Starcoder2Model`]
hidden_size (`int`, *optional*, defaults to 3072):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 12288):
Dimension of the MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 30):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 24):
Number of attention heads for each attention layer in the Transformer encoder.
num_key_value_heads (`int`, *optional*, defaults to 2):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
`num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details, check out [this
paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to `8`.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu_pytorch_tanh"`):
The non-linear activation function (function or string) in the decoder.
max_position_embeddings (`int`, *optional*, defaults to 4096):
The maximum sequence length that this model might ever be used with. Starcoder2's sliding window attention
allows sequence of up to 4096*32 tokens.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
norm_epsilon (`float`, *optional*, defaults to 1e-05):
Epsilon value for the layer norm
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
bos_token_id (`int`, *optional*, defaults to 50256):
The id of the "beginning-of-sequence" token.
eos_token_id (`int`, *optional*, defaults to 50256):
The id of the "end-of-sequence" token.
rope_theta (`float`, *optional*, defaults to 10000.0):
The base period of the RoPE embeddings.
rope_scaling (`Dict`, *optional*):
Dictionary containing the scaling configuration for the RoPE embeddings. NOTE: if you apply new rope type
and you expect the model to work on longer `max_position_embeddings`, we recommend you to update this value
accordingly.
Expected contents:
`rope_type` (`str`):
The sub-variant of RoPE to use. Can be one of ['default', 'linear', 'dynamic', 'yarn', 'longrope',
'llama3'], with 'default' being the original RoPE implementation.
`factor` (`float`, *optional*):
Used with all rope types except 'default'. The scaling factor to apply to the RoPE embeddings. In
most scaling types, a `factor` of x will enable the model to handle sequences of length x *
original maximum pre-trained length.
`original_max_position_embeddings` (`int`, *optional*):
Used with 'dynamic', 'longrope' and 'llama3'. The original max position embeddings used during
pretraining.
`attention_factor` (`float`, *optional*):
Used with 'yarn' and 'longrope'. The scaling factor to be applied on the attention
computation. If unspecified, it defaults to value recommended by the implementation, using the
`factor` field to infer the suggested value.
`beta_fast` (`float`, *optional*):
Only used with 'yarn'. Parameter to set the boundary for extrapolation (only) in the linear
ramp function. If unspecified, it defaults to 32.
`beta_slow` (`float`, *optional*):
Only used with 'yarn'. Parameter to set the boundary for interpolation (only) in the linear
ramp function. If unspecified, it defaults to 1.
`short_factor` (`list[float]`, *optional*):
Only used with 'longrope'. The scaling factor to be applied to short contexts (<
`original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden
size divided by the number of attention heads divided by 2
`long_factor` (`list[float]`, *optional*):
Only used with 'longrope'. The scaling factor to be applied to long contexts (<
`original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden
size divided by the number of attention heads divided by 2
`low_freq_factor` (`float`, *optional*):
Only used with 'llama3'. Scaling factor applied to low frequency components of the RoPE
`high_freq_factor` (`float`, *optional*):
Only used with 'llama3'. Scaling factor applied to high frequency components of the RoPE
sliding_window (`int`, *optional*):
Sliding window attention window size. If not specified, will default to `None` (no sliding window).
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
residual_dropout (`float`, *optional*, defaults to 0.0):
Residual connection dropout value.
embedding_dropout (`float`, *optional*, defaults to 0.0):
Embedding dropout.
use_bias (`bool`, *optional*, defaults to `True`):
Whether to use bias term on linear layers of the model.
```python
>>> from transformers import Starcoder2Model, Starcoder2Config
>>> # Initializing a Starcoder2 7B style configuration
>>> configuration = Starcoder2Config()
>>> # Initializing a model from the Starcoder2 7B style configuration
>>> model = Starcoder2Model(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = 'starcoder2'
keys_to_ignore_at_inference = ['past_key_values']
base_model_tp_plan = {'layers.*.self_attn.q_proj': 'colwise', 'layers.*.self_attn.k_proj': 'colwise', 'layers.*.self_attn.v_proj': 'colwise', 'layers.*.self_attn.o_proj': 'rowwise', 'layers.*.mlp.c_fc': 'colwise', 'layers.*.mlp.c_proj': 'rowwise'}
base_model_pp_plan = {'embed_tokens': (['input_ids'], ['inputs_embeds']), 'layers': (['hidden_states', 'attention_mask'], ['hidden_states']), 'norm': (['hidden_states'], ['hidden_states'])}
def __init__(self, vocab_size=49152, hidden_size=3072, intermediate_size=12288, num_hidden_layers=30, num_attention_heads=24, num_key_value_heads=2, hidden_act='gelu_pytorch_tanh', max_position_embeddings=4096, initializer_range=0.018042, norm_epsilon=1e-05, use_cache=True, bos_token_id=50256, eos_token_id=50256, rope_theta=10000.0, rope_scaling=None, sliding_window=None, attention_dropout=0.0, residual_dropout=0.0, embedding_dropout=0.0, use_bias=True, **kwargs):
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.sliding_window = sliding_window
self.use_bias = use_bias
self.num_key_value_heads = num_key_value_heads
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.norm_epsilon = norm_epsilon
self.use_cache = use_cache
self.rope_theta = rope_theta
self.rope_scaling = rope_scaling
self.attention_dropout = attention_dropout
self.residual_dropout = residual_dropout
self.embedding_dropout = embedding_dropout
if self.rope_scaling is not None and 'type' in self.rope_scaling:
self.rope_scaling['rope_type'] = self.rope_scaling['type']
rope_config_validation(self)
super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
|
class Starcoder2Config(PretrainedConfig):
'''
This is the configuration class to store the configuration of a [`Starcoder2Model`]. It is used to instantiate a
Starcoder2 model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the [bigcode/starcoder2-7b](https://huggingface.co/bigcode/starcoder2-7b) model.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 49152):
Vocabulary size of the Starcoder2 model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`Starcoder2Model`]
hidden_size (`int`, *optional*, defaults to 3072):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 12288):
Dimension of the MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 30):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 24):
Number of attention heads for each attention layer in the Transformer encoder.
num_key_value_heads (`int`, *optional*, defaults to 2):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
`num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details, check out [this
paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to `8`.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu_pytorch_tanh"`):
The non-linear activation function (function or string) in the decoder.
max_position_embeddings (`int`, *optional*, defaults to 4096):
The maximum sequence length that this model might ever be used with. Starcoder2's sliding window attention
allows sequence of up to 4096*32 tokens.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
norm_epsilon (`float`, *optional*, defaults to 1e-05):
Epsilon value for the layer norm
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
bos_token_id (`int`, *optional*, defaults to 50256):
The id of the "beginning-of-sequence" token.
eos_token_id (`int`, *optional*, defaults to 50256):
The id of the "end-of-sequence" token.
rope_theta (`float`, *optional*, defaults to 10000.0):
The base period of the RoPE embeddings.
rope_scaling (`Dict`, *optional*):
Dictionary containing the scaling configuration for the RoPE embeddings. NOTE: if you apply new rope type
and you expect the model to work on longer `max_position_embeddings`, we recommend you to update this value
accordingly.
Expected contents:
`rope_type` (`str`):
The sub-variant of RoPE to use. Can be one of ['default', 'linear', 'dynamic', 'yarn', 'longrope',
'llama3'], with 'default' being the original RoPE implementation.
`factor` (`float`, *optional*):
Used with all rope types except 'default'. The scaling factor to apply to the RoPE embeddings. In
most scaling types, a `factor` of x will enable the model to handle sequences of length x *
original maximum pre-trained length.
`original_max_position_embeddings` (`int`, *optional*):
Used with 'dynamic', 'longrope' and 'llama3'. The original max position embeddings used during
pretraining.
`attention_factor` (`float`, *optional*):
Used with 'yarn' and 'longrope'. The scaling factor to be applied on the attention
computation. If unspecified, it defaults to value recommended by the implementation, using the
`factor` field to infer the suggested value.
`beta_fast` (`float`, *optional*):
Only used with 'yarn'. Parameter to set the boundary for extrapolation (only) in the linear
ramp function. If unspecified, it defaults to 32.
`beta_slow` (`float`, *optional*):
Only used with 'yarn'. Parameter to set the boundary for interpolation (only) in the linear
ramp function. If unspecified, it defaults to 1.
`short_factor` (`list[float]`, *optional*):
Only used with 'longrope'. The scaling factor to be applied to short contexts (<
`original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden
size divided by the number of attention heads divided by 2
`long_factor` (`list[float]`, *optional*):
Only used with 'longrope'. The scaling factor to be applied to long contexts (<
`original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden
size divided by the number of attention heads divided by 2
`low_freq_factor` (`float`, *optional*):
Only used with 'llama3'. Scaling factor applied to low frequency components of the RoPE
`high_freq_factor` (`float`, *optional*):
Only used with 'llama3'. Scaling factor applied to high frequency components of the RoPE
sliding_window (`int`, *optional*):
Sliding window attention window size. If not specified, will default to `None` (no sliding window).
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
residual_dropout (`float`, *optional*, defaults to 0.0):
Residual connection dropout value.
embedding_dropout (`float`, *optional*, defaults to 0.0):
Embedding dropout.
use_bias (`bool`, *optional*, defaults to `True`):
Whether to use bias term on linear layers of the model.
```python
>>> from transformers import Starcoder2Model, Starcoder2Config
>>> # Initializing a Starcoder2 7B style configuration
>>> configuration = Starcoder2Config()
>>> # Initializing a model from the Starcoder2 7B style configuration
>>> model = Starcoder2Model(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```'''
def __init__(self, vocab_size=49152, hidden_size=3072, intermediate_size=12288, num_hidden_layers=30, num_attention_heads=24, num_key_value_heads=2, hidden_act='gelu_pytorch_tanh', max_position_embeddings=4096, initializer_range=0.018042, norm_epsilon=1e-05, use_cache=True, bos_token_id=50256, eos_token_id=50256, rope_theta=10000.0, rope_scaling=None, sliding_window=None, attention_dropout=0.0, residual_dropout=0.0, embedding_dropout=0.0, use_bias=True, **kwargs):
pass
| 2
| 1
| 53
| 1
| 50
| 2
| 2
| 1.67
| 1
| 1
| 0
| 0
| 1
| 18
| 1
| 1
| 175
| 12
| 61
| 46
| 36
| 102
| 27
| 23
| 25
| 2
| 1
| 1
| 2
|
5,405
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/starcoder2/modeling_starcoder2.py
|
transformers.models.starcoder2.modeling_starcoder2.Starcoder2Attention
|
import torch
from ...modeling_flash_attention_utils import FlashAttentionKwargs
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from torch import nn
from .configuration_starcoder2 import Starcoder2Config
from ...cache_utils import Cache, DynamicCache
from ...utils.deprecation import deprecate_kwarg
from ...processing_utils import Unpack
from typing import Callable, Optional, Union
class Starcoder2Attention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, config: Starcoder2Config, layer_idx: Optional[int]=None):
super().__init__()
self.config = config
self.layer_idx = layer_idx
self.head_dim = getattr(config, 'head_dim', None) or config.hidden_size // config.num_attention_heads
self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
self.scaling = self.head_dim ** (-0.5)
self.attention_dropout = config.attention_dropout
self.is_causal = True
self.q_proj = nn.Linear(config.hidden_size, config.num_attention_heads * self.head_dim, bias=config.use_bias)
self.k_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.use_bias)
self.v_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.use_bias)
self.o_proj = nn.Linear(config.num_attention_heads * self.head_dim, config.hidden_size, bias=config.use_bias)
self.residual_dropout = config.residual_dropout
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, position_embeddings: tuple[torch.Tensor, torch.Tensor], attention_mask: Optional[torch.Tensor], past_key_values: Optional[Cache]=None, cache_position: Optional[torch.LongTensor]=None, **kwargs: Unpack[FlashAttentionKwargs]) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, self.head_dim)
query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2)
key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2)
value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2)
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
if past_key_values is not None:
cache_kwargs = {'sin': sin, 'cos': cos, 'cache_position': cache_position}
key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != 'eager':
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(self, query_states, key_states, value_states, attention_mask, dropout=0.0 if not self.training else self.attention_dropout, scaling=self.scaling, sliding_window=getattr(self.config, 'sliding_window', None), **kwargs)
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
attn_output = self.o_proj(attn_output)
attn_output = nn.functional.dropout(attn_output, p=self.residual_dropout, training=self.training)
return (attn_output, attn_weights)
|
class Starcoder2Attention(nn.Module):
'''Multi-headed attention from 'Attention Is All You Need' paper'''
def __init__(self, config: Starcoder2Config, layer_idx: Optional[int]=None):
pass
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, position_embeddings: tuple[torch.Tensor, torch.Tensor], attention_mask: Optional[torch.Tensor], past_key_values: Optional[Cache]=None, cache_position: Optional[torch.LongTensor]=None, **kwargs: Unpack[FlashAttentionKwargs]) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
pass
| 4
| 1
| 34
| 4
| 30
| 2
| 3
| 0.07
| 1
| 6
| 3
| 0
| 2
| 12
| 2
| 12
| 71
| 9
| 60
| 32
| 49
| 4
| 36
| 24
| 33
| 5
| 1
| 2
| 6
|
5,406
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/starcoder2/modeling_starcoder2.py
|
transformers.models.starcoder2.modeling_starcoder2.Starcoder2DecoderLayer
|
from torch import nn
import torch
from ...utils.deprecation import deprecate_kwarg
from ...cache_utils import Cache, DynamicCache
from typing import Callable, Optional, Union
from ...modeling_layers import GenericForSequenceClassification, GenericForTokenClassification, GradientCheckpointingLayer
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple
from ...processing_utils import Unpack
from .configuration_starcoder2 import Starcoder2Config
class Starcoder2DecoderLayer(GradientCheckpointingLayer):
def __init__(self, config: Starcoder2Config, layer_idx: int):
super().__init__()
self.hidden_size = config.hidden_size
self.self_attn = Starcoder2Attention(config=config, layer_idx=layer_idx)
self.mlp = Starcoder2MLP(config)
self.input_layernorm = nn.LayerNorm(config.hidden_size, eps=config.norm_epsilon)
self.post_attention_layernorm = nn.LayerNorm(config.hidden_size, eps=config.norm_epsilon)
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, use_cache: Optional[bool]=False, cache_position: Optional[torch.LongTensor]=None, position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]]=None, **kwargs: Unpack[TransformersKwargs]) -> torch.Tensor:
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
hidden_states, _ = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, use_cache=use_cache, cache_position=cache_position, position_embeddings=position_embeddings, **kwargs)
hidden_states = residual + hidden_states
residual = hidden_states
hidden_states = self.post_attention_layernorm(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + hidden_states
return hidden_states
|
class Starcoder2DecoderLayer(GradientCheckpointingLayer):
def __init__(self, config: Starcoder2Config, layer_idx: int):
pass
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, use_cache: Optional[bool]=False, cache_position: Optional[torch.LongTensor]=None, position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]]=None, **kwargs: Unpack[TransformersKwargs]) -> torch.Tensor:
pass
| 4
| 0
| 24
| 3
| 21
| 2
| 2
| 0.07
| 1
| 9
| 5
| 0
| 2
| 5
| 2
| 12
| 50
| 6
| 42
| 22
| 28
| 3
| 21
| 11
| 18
| 2
| 1
| 1
| 3
|
5,407
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/starcoder2/modeling_starcoder2.py
|
transformers.models.starcoder2.modeling_starcoder2.Starcoder2ForCausalLM
|
from typing import Callable, Optional, Union
import torch
from ...processing_utils import Unpack
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple
from ...cache_utils import Cache, DynamicCache
from ...modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast
from ...generation import GenerationMixin
from torch import nn
@auto_docstring
class Starcoder2ForCausalLM(Starcoder2PreTrainedModel, GenerationMixin):
_tied_weights_keys = ['lm_head.weight']
_tp_plan = {'lm_head': 'colwise_rep'}
_pp_plan = {'lm_head': (['hidden_states'], ['logits'])}
def __init__(self, config):
super().__init__(config)
self.model = Starcoder2Model(config)
self.vocab_size = config.vocab_size
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.post_init()
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, logits_to_keep: Union[int, torch.Tensor]=0, **kwargs: Unpack[TransformersKwargs]) -> CausalLMOutputWithPast:
"""
Example:
```python
>>> from transformers import AutoTokenizer, Starcoder2ForCausalLM
>>> model = Starcoder2ForCausalLM.from_pretrained("meta-starcoder2/Starcoder2-2-7b-hf")
>>> tokenizer = AutoTokenizer.from_pretrained("meta-starcoder2/Starcoder2-2-7b-hf")
>>> prompt = "Hey, are you conscious? Can you talk to me?"
>>> inputs = tokenizer(prompt, return_tensors="pt")
>>> # Generate
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"Hey, are you conscious? Can you talk to me?\\nI'm not conscious, but I can talk to you."
```"""
outputs: BaseModelOutputWithPast = self.model(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, cache_position=cache_position, **kwargs)
hidden_states = outputs.last_hidden_state
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :])
loss = None
if labels is not None:
loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.vocab_size, **kwargs)
return CausalLMOutputWithPast(loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
|
@auto_docstring
class Starcoder2ForCausalLM(Starcoder2PreTrainedModel, GenerationMixin):
def __init__(self, config):
pass
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, logits_to_keep: Union[int, torch.Tensor]=0, **kwargs: Unpack[TransformersKwargs]) -> CausalLMOutputWithPast:
'''
Example:
```python
>>> from transformers import AutoTokenizer, Starcoder2ForCausalLM
>>> model = Starcoder2ForCausalLM.from_pretrained("meta-starcoder2/Starcoder2-2-7b-hf")
>>> tokenizer = AutoTokenizer.from_pretrained("meta-starcoder2/Starcoder2-2-7b-hf")
>>> prompt = "Hey, are you conscious? Can you talk to me?"
>>> inputs = tokenizer(prompt, return_tensors="pt")
>>> # Generate
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
```'''
pass
| 6
| 1
| 14
| 2
| 9
| 4
| 2
| 0.38
| 2
| 9
| 4
| 0
| 8
| 3
| 8
| 9
| 123
| 21
| 74
| 36
| 47
| 28
| 36
| 20
| 27
| 8
| 2
| 1
| 15
|
5,408
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/starcoder2/modeling_starcoder2.py
|
transformers.models.starcoder2.modeling_starcoder2.Starcoder2ForSequenceClassification
|
from ...modeling_layers import GenericForSequenceClassification, GenericForTokenClassification, GradientCheckpointingLayer
class Starcoder2ForSequenceClassification(GenericForSequenceClassification, Starcoder2PreTrainedModel):
pass
|
class Starcoder2ForSequenceClassification(GenericForSequenceClassification, Starcoder2PreTrainedModel):
pass
| 1
| 0
| 21
| 2
| 17
| 2
| 3
| 0.11
| 1
| 7
| 3
| 0
| 4
| 3
| 4
| 5
| 90
| 11
| 71
| 31
| 53
| 8
| 36
| 18
| 31
| 9
| 2
| 1
| 12
|
5,409
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/starcoder2/modeling_starcoder2.py
|
transformers.models.starcoder2.modeling_starcoder2.Starcoder2ForTokenClassification
|
from ...modeling_layers import GenericForSequenceClassification, GenericForTokenClassification, GradientCheckpointingLayer
class Starcoder2ForTokenClassification(GenericForTokenClassification, Starcoder2PreTrainedModel):
pass
|
class Starcoder2ForTokenClassification(GenericForTokenClassification, Starcoder2PreTrainedModel):
pass
| 1
| 0
| 17
| 1
| 14
| 2
| 3
| 0.11
| 1
| 5
| 2
| 0
| 4
| 4
| 4
| 5
| 79
| 8
| 64
| 28
| 41
| 7
| 29
| 15
| 24
| 5
| 2
| 1
| 10
|
5,410
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/starcoder2/modeling_starcoder2.py
|
transformers.models.starcoder2.modeling_starcoder2.Starcoder2MLP
|
import torch
from torch import nn
from typing import Callable, Optional, Union
from .configuration_starcoder2 import Starcoder2Config
from ...activations import ACT2FN
class Starcoder2MLP(nn.Module):
def __init__(self, config: Starcoder2Config):
super().__init__()
embed_dim = config.hidden_size
self.c_fc = nn.Linear(embed_dim, config.intermediate_size, bias=config.use_bias)
self.c_proj = nn.Linear(config.intermediate_size, embed_dim, bias=config.use_bias)
self.act = ACT2FN[config.hidden_act]
self.residual_dropout = config.residual_dropout
def forward(self, hidden_states: Optional[tuple[torch.FloatTensor]]) -> torch.FloatTensor:
hidden_states = self.c_fc(hidden_states)
hidden_states = self.act(hidden_states)
hidden_states = self.c_proj(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.residual_dropout, training=self.training)
return hidden_states
|
class Starcoder2MLP(nn.Module):
def __init__(self, config: Starcoder2Config):
pass
def forward(self, hidden_states: Optional[tuple[torch.FloatTensor]]) -> torch.FloatTensor:
pass
| 3
| 0
| 7
| 0
| 7
| 0
| 1
| 0
| 1
| 2
| 1
| 0
| 2
| 4
| 2
| 12
| 15
| 1
| 14
| 8
| 11
| 0
| 14
| 8
| 11
| 1
| 1
| 0
| 2
|
5,411
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/starcoder2/modeling_starcoder2.py
|
transformers.models.starcoder2.modeling_starcoder2.Starcoder2Model
|
from typing import Callable, Optional, Union
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple
from ...modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast
import torch
from torch import nn
from ...cache_utils import Cache, DynamicCache
from .configuration_starcoder2 import Starcoder2Config
from ...processing_utils import Unpack
from ...masking_utils import create_causal_mask, create_sliding_window_causal_mask
from transformers.utils.generic import check_model_inputs
@auto_docstring
class Starcoder2Model(Starcoder2PreTrainedModel):
def __init__(self, config: Starcoder2Config):
super().__init__(config)
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
self.layers = nn.ModuleList([Starcoder2DecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)])
self.norm = nn.LayerNorm(config.hidden_size, eps=config.norm_epsilon)
self.rotary_emb = Starcoder2RotaryEmbedding(config=config)
self.gradient_checkpointing = False
self.embedding_dropout = config.embedding_dropout
self.post_init()
@check_model_inputs
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Union[Cache, list[torch.FloatTensor]]]=None, inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> BaseModelOutputWithPast:
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError('You must specify exactly one of input_ids or inputs_embeds')
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids)
if use_cache and past_key_values is None:
past_key_values = DynamicCache(config=self.config)
if cache_position is None:
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
cache_position = torch.arange(past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device)
if position_ids is None:
position_ids = cache_position.unsqueeze(0)
mask_function = create_causal_mask if self.config.sliding_window is None else create_sliding_window_causal_mask
causal_mask = mask_function(config=self.config, input_embeds=inputs_embeds, attention_mask=attention_mask, cache_position=cache_position, past_key_values=past_key_values, position_ids=position_ids)
hidden_states = inputs_embeds
hidden_states = nn.functional.dropout(hidden_states, p=self.embedding_dropout, training=self.training)
position_embeddings = self.rotary_emb(hidden_states, position_ids)
for decoder_layer in self.layers[:self.config.num_hidden_layers]:
hidden_states = decoder_layer(hidden_states, attention_mask=causal_mask, position_ids=position_ids, past_key_values=past_key_values, use_cache=use_cache, cache_position=cache_position, position_embeddings=position_embeddings, **kwargs)
hidden_states = self.norm(hidden_states)
return BaseModelOutputWithPast(last_hidden_state=hidden_states, past_key_values=past_key_values if use_cache else None)
|
@auto_docstring
class Starcoder2Model(Starcoder2PreTrainedModel):
def __init__(self, config: Starcoder2Config):
pass
@check_model_inputs
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Union[Cache, list[torch.FloatTensor]]]=None, inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> BaseModelOutputWithPast:
pass
| 5
| 0
| 44
| 4
| 33
| 7
| 7
| 0.24
| 1
| 16
| 10
| 0
| 5
| 9
| 6
| 7
| 277
| 33
| 199
| 72
| 160
| 47
| 99
| 39
| 92
| 20
| 2
| 3
| 40
|
5,412
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/starcoder2/modeling_starcoder2.py
|
transformers.models.starcoder2.modeling_starcoder2.Starcoder2PreTrainedModel
|
from .configuration_starcoder2 import Starcoder2Config
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
@auto_docstring
class Starcoder2PreTrainedModel(PreTrainedModel):
config: Starcoder2Config
base_model_prefix = 'model'
supports_gradient_checkpointing = True
_no_split_modules = ['Starcoder2DecoderLayer']
_skip_keys_device_placement = ['past_key_values']
_supports_flash_attn = True
_supports_sdpa = True
_supports_flex_attn = True
_can_compile_fullgraph = True
_supports_attention_backend = True
_can_record_outputs = {'hidden_states': Starcoder2DecoderLayer, 'attentions': Starcoder2Attention}
|
@auto_docstring
class Starcoder2PreTrainedModel(PreTrainedModel):
pass
| 2
| 0
| 10
| 0
| 10
| 0
| 5
| 0
| 1
| 0
| 0
| 4
| 1
| 0
| 1
| 1
| 24
| 1
| 23
| 15
| 21
| 0
| 22
| 15
| 20
| 5
| 1
| 2
| 5
|
5,413
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/starcoder2/modeling_starcoder2.py
|
transformers.models.starcoder2.modeling_starcoder2.Starcoder2RotaryEmbedding
|
import torch
from ...modeling_rope_utils import ROPE_INIT_FUNCTIONS, dynamic_rope_update
from .configuration_starcoder2 import Starcoder2Config
from torch import nn
class Starcoder2RotaryEmbedding(nn.Module):
inv_freq: torch.Tensor
def __init__(self, config: Starcoder2Config, device=None):
super().__init__()
if hasattr(config, 'rope_scaling') and isinstance(config.rope_scaling, dict):
self.rope_type = config.rope_scaling.get('rope_type', config.rope_scaling.get('type'))
else:
self.rope_type = 'default'
self.max_seq_len_cached = config.max_position_embeddings
self.original_max_seq_len = config.max_position_embeddings
self.config = config
self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device)
self.register_buffer('inv_freq', inv_freq, persistent=False)
self.original_inv_freq = self.inv_freq
@torch.no_grad()
@dynamic_rope_update
def forward(self, x, position_ids):
inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device)
position_ids_expanded = position_ids[:, None, :].float()
device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != 'mps' else 'cpu'
with torch.autocast(device_type=device_type, enabled=False):
freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
emb = torch.cat((freqs, freqs), dim=-1)
cos = emb.cos() * self.attention_scaling
sin = emb.sin() * self.attention_scaling
return (cos.to(dtype=x.dtype), sin.to(dtype=x.dtype))
|
class Starcoder2RotaryEmbedding(nn.Module):
def __init__(self, config: Starcoder2Config, device=None):
pass
@torch.no_grad()
@dynamic_rope_update
def forward(self, x, position_ids):
pass
| 5
| 0
| 18
| 2
| 13
| 5
| 3
| 0.35
| 1
| 4
| 1
| 0
| 3
| 7
| 3
| 13
| 59
| 8
| 40
| 21
| 35
| 14
| 38
| 20
| 34
| 3
| 1
| 1
| 8
|
5,414
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/starcoder2/modular_starcoder2.py
|
transformers.models.starcoder2.modular_starcoder2.Starcoder2Attention
|
from ...utils.deprecation import deprecate_kwarg
from torch import nn
from ...modeling_flash_attention_utils import FlashAttentionKwargs
from typing import Callable, Optional, Union
from ...cache_utils import Cache, DynamicCache
from ...processing_utils import Unpack
from ..mistral.modeling_mistral import MistralAttention, MistralDecoderLayer, MistralForCausalLM, MistralForSequenceClassification, MistralForTokenClassification, MistralModel, MistralRotaryEmbedding, apply_rotary_pos_emb, eager_attention_forward
import torch
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS
from .configuration_starcoder2 import Starcoder2Config
class Starcoder2Attention(MistralAttention):
def __init__(self, config: Starcoder2Config, layer_idx: Optional[int]=None):
super().__init__(config=config, layer_idx=layer_idx)
self.residual_dropout = config.residual_dropout
self.q_proj = nn.Linear(config.hidden_size, config.num_attention_heads * self.head_dim, bias=config.use_bias)
self.k_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.use_bias)
self.v_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.use_bias)
self.o_proj = nn.Linear(config.num_attention_heads * self.head_dim, config.hidden_size, bias=config.use_bias)
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, position_embeddings: tuple[torch.Tensor, torch.Tensor], attention_mask: Optional[torch.Tensor], past_key_values: Optional[Cache]=None, cache_position: Optional[torch.LongTensor]=None, **kwargs: Unpack[FlashAttentionKwargs]) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, self.head_dim)
query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2)
key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2)
value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2)
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
if past_key_values is not None:
cache_kwargs = {'sin': sin, 'cos': cos, 'cache_position': cache_position}
key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != 'eager':
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(self, query_states, key_states, value_states, attention_mask, dropout=0.0 if not self.training else self.attention_dropout, scaling=self.scaling, sliding_window=getattr(self.config, 'sliding_window', None), **kwargs)
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
attn_output = self.o_proj(attn_output)
attn_output = nn.functional.dropout(attn_output, p=self.residual_dropout, training=self.training)
return (attn_output, attn_weights)
|
class Starcoder2Attention(MistralAttention):
def __init__(self, config: Starcoder2Config, layer_idx: Optional[int]=None):
pass
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, position_embeddings: tuple[torch.Tensor, torch.Tensor], attention_mask: Optional[torch.Tensor], past_key_values: Optional[Cache]=None, cache_position: Optional[torch.LongTensor]=None, **kwargs: Unpack[FlashAttentionKwargs]) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
pass
| 4
| 0
| 30
| 4
| 26
| 2
| 3
| 0.06
| 1
| 6
| 3
| 0
| 2
| 8
| 2
| 14
| 62
| 8
| 53
| 27
| 42
| 3
| 29
| 17
| 26
| 5
| 2
| 2
| 6
|
5,415
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/starcoder2/modular_starcoder2.py
|
transformers.models.starcoder2.modular_starcoder2.Starcoder2DecoderLayer
|
from ..mistral.modeling_mistral import MistralAttention, MistralDecoderLayer, MistralForCausalLM, MistralForSequenceClassification, MistralForTokenClassification, MistralModel, MistralRotaryEmbedding, apply_rotary_pos_emb, eager_attention_forward
from .configuration_starcoder2 import Starcoder2Config
from torch import nn
class Starcoder2DecoderLayer(MistralDecoderLayer):
def __init__(self, config: Starcoder2Config, layer_idx: int):
super().__init__(config, layer_idx)
self.self_attn = Starcoder2Attention(config=config, layer_idx=layer_idx)
self.mlp = Starcoder2MLP(config)
self.input_layernorm = nn.LayerNorm(config.hidden_size, eps=config.norm_epsilon)
self.post_attention_layernorm = nn.LayerNorm(config.hidden_size, eps=config.norm_epsilon)
|
class Starcoder2DecoderLayer(MistralDecoderLayer):
def __init__(self, config: Starcoder2Config, layer_idx: int):
pass
| 2
| 0
| 6
| 0
| 6
| 0
| 1
| 0
| 1
| 5
| 3
| 0
| 1
| 4
| 1
| 13
| 7
| 0
| 7
| 6
| 5
| 0
| 7
| 6
| 5
| 1
| 2
| 0
| 1
|
5,416
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/starcoder2/modular_starcoder2.py
|
transformers.models.starcoder2.modular_starcoder2.Starcoder2ForCausalLM
|
from ..mistral.modeling_mistral import MistralAttention, MistralDecoderLayer, MistralForCausalLM, MistralForSequenceClassification, MistralForTokenClassification, MistralModel, MistralRotaryEmbedding, apply_rotary_pos_emb, eager_attention_forward
class Starcoder2ForCausalLM(MistralForCausalLM):
pass
|
class Starcoder2ForCausalLM(MistralForCausalLM):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 9
| 2
| 0
| 2
| 1
| 1
| 0
| 2
| 1
| 1
| 0
| 3
| 0
| 0
|
5,417
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/starcoder2/modular_starcoder2.py
|
transformers.models.starcoder2.modular_starcoder2.Starcoder2ForSequenceClassification
|
from ..mistral.modeling_mistral import MistralAttention, MistralDecoderLayer, MistralForCausalLM, MistralForSequenceClassification, MistralForTokenClassification, MistralModel, MistralRotaryEmbedding, apply_rotary_pos_emb, eager_attention_forward
class Starcoder2ForSequenceClassification(MistralForSequenceClassification):
pass
|
class Starcoder2ForSequenceClassification(MistralForSequenceClassification):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 5
| 2
| 0
| 2
| 1
| 1
| 0
| 2
| 1
| 1
| 0
| 3
| 0
| 0
|
5,418
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/starcoder2/modular_starcoder2.py
|
transformers.models.starcoder2.modular_starcoder2.Starcoder2ForTokenClassification
|
from ..mistral.modeling_mistral import MistralAttention, MistralDecoderLayer, MistralForCausalLM, MistralForSequenceClassification, MistralForTokenClassification, MistralModel, MistralRotaryEmbedding, apply_rotary_pos_emb, eager_attention_forward
class Starcoder2ForTokenClassification(MistralForTokenClassification):
pass
|
class Starcoder2ForTokenClassification(MistralForTokenClassification):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 5
| 2
| 0
| 2
| 1
| 1
| 0
| 2
| 1
| 1
| 0
| 3
| 0
| 0
|
5,419
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/starcoder2/modular_starcoder2.py
|
transformers.models.starcoder2.modular_starcoder2.Starcoder2MLP
|
from .configuration_starcoder2 import Starcoder2Config
from ...activations import ACT2FN
from torch import nn
from typing import Callable, Optional, Union
import torch
class Starcoder2MLP(nn.Module):
def __init__(self, config: Starcoder2Config):
super().__init__()
embed_dim = config.hidden_size
self.c_fc = nn.Linear(embed_dim, config.intermediate_size, bias=config.use_bias)
self.c_proj = nn.Linear(config.intermediate_size, embed_dim, bias=config.use_bias)
self.act = ACT2FN[config.hidden_act]
self.residual_dropout = config.residual_dropout
def forward(self, hidden_states: Optional[tuple[torch.FloatTensor]]) -> torch.FloatTensor:
hidden_states = self.c_fc(hidden_states)
hidden_states = self.act(hidden_states)
hidden_states = self.c_proj(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.residual_dropout, training=self.training)
return hidden_states
|
class Starcoder2MLP(nn.Module):
def __init__(self, config: Starcoder2Config):
pass
def forward(self, hidden_states: Optional[tuple[torch.FloatTensor]]) -> torch.FloatTensor:
pass
| 3
| 0
| 7
| 0
| 7
| 0
| 1
| 0
| 1
| 2
| 1
| 0
| 2
| 4
| 2
| 12
| 15
| 1
| 14
| 8
| 11
| 0
| 14
| 8
| 11
| 1
| 1
| 0
| 2
|
5,420
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/starcoder2/modular_starcoder2.py
|
transformers.models.starcoder2.modular_starcoder2.Starcoder2Model
|
from ...processing_utils import Unpack
import torch
from typing import Callable, Optional, Union
from ...modeling_outputs import BaseModelOutputWithPast
from ...utils import TransformersKwargs, logging
from ...cache_utils import Cache, DynamicCache
from ...masking_utils import create_causal_mask, create_sliding_window_causal_mask
from ..mistral.modeling_mistral import MistralAttention, MistralDecoderLayer, MistralForCausalLM, MistralForSequenceClassification, MistralForTokenClassification, MistralModel, MistralRotaryEmbedding, apply_rotary_pos_emb, eager_attention_forward
from .configuration_starcoder2 import Starcoder2Config
from torch import nn
from transformers.utils.generic import check_model_inputs
class Starcoder2Model(MistralModel):
def __init__(self, config: Starcoder2Config):
super().__init__(config)
self.layers = nn.ModuleList([Starcoder2DecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)])
self.norm = nn.LayerNorm(config.hidden_size, eps=config.norm_epsilon)
self.embedding_dropout = config.embedding_dropout
@check_model_inputs
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Union[Cache, list[torch.FloatTensor]]]=None, inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> BaseModelOutputWithPast:
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError('You must specify exactly one of input_ids or inputs_embeds')
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids)
if use_cache and past_key_values is None:
past_key_values = DynamicCache(config=self.config)
if cache_position is None:
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
cache_position = torch.arange(past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device)
if position_ids is None:
position_ids = cache_position.unsqueeze(0)
mask_function = create_causal_mask if self.config.sliding_window is None else create_sliding_window_causal_mask
causal_mask = mask_function(config=self.config, input_embeds=inputs_embeds, attention_mask=attention_mask, cache_position=cache_position, past_key_values=past_key_values, position_ids=position_ids)
hidden_states = inputs_embeds
hidden_states = nn.functional.dropout(hidden_states, p=self.embedding_dropout, training=self.training)
position_embeddings = self.rotary_emb(hidden_states, position_ids)
for decoder_layer in self.layers[:self.config.num_hidden_layers]:
hidden_states = decoder_layer(hidden_states, attention_mask=causal_mask, position_ids=position_ids, past_key_values=past_key_values, use_cache=use_cache, cache_position=cache_position, position_embeddings=position_embeddings, **kwargs)
hidden_states = self.norm(hidden_states)
return BaseModelOutputWithPast(last_hidden_state=hidden_states, past_key_values=past_key_values if use_cache else None)
|
class Starcoder2Model(MistralModel):
def __init__(self, config: Starcoder2Config):
pass
@check_model_inputs
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Union[Cache, list[torch.FloatTensor]]]=None, inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> BaseModelOutputWithPast:
pass
| 4
| 0
| 51
| 9
| 41
| 2
| 11
| 0.05
| 1
| 11
| 6
| 0
| 2
| 3
| 2
| 9
| 105
| 18
| 84
| 29
| 67
| 4
| 43
| 15
| 40
| 20
| 3
| 2
| 21
|
5,421
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/superglue/configuration_superglue.py
|
transformers.models.superglue.configuration_superglue.SuperGlueConfig
|
from ...configuration_utils import PretrainedConfig
from ..auto import CONFIG_MAPPING
from typing import TYPE_CHECKING, Optional
class SuperGlueConfig(PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`SuperGlueModel`]. It is used to instantiate a
SuperGlue model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the SuperGlue
[magic-leap-community/superglue_indoor](https://huggingface.co/magic-leap-community/superglue_indoor) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
keypoint_detector_config (`Union[AutoConfig, dict]`, *optional*, defaults to `SuperPointConfig`):
The config object or dictionary of the keypoint detector.
hidden_size (`int`, *optional*, defaults to 256):
The dimension of the descriptors.
keypoint_encoder_sizes (`list[int]`, *optional*, defaults to `[32, 64, 128, 256]`):
The sizes of the keypoint encoder layers.
gnn_layers_types (`list[str]`, *optional*, defaults to `['self', 'cross', 'self', 'cross', 'self', 'cross', 'self', 'cross', 'self', 'cross', 'self', 'cross', 'self', 'cross', 'self', 'cross', 'self', 'cross']`):
The types of the GNN layers. Must be either 'self' or 'cross'.
num_attention_heads (`int`, *optional*, defaults to 4):
The number of heads in the GNN layers.
sinkhorn_iterations (`int`, *optional*, defaults to 100):
The number of Sinkhorn iterations.
matching_threshold (`float`, *optional*, defaults to 0.0):
The matching threshold.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
Examples:
```python
>>> from transformers import SuperGlueConfig, SuperGlueModel
>>> # Initializing a SuperGlue superglue style configuration
>>> configuration = SuperGlueConfig()
>>> # Initializing a model from the superglue style configuration
>>> model = SuperGlueModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```
"""
model_type = 'superglue'
def __init__(self, keypoint_detector_config: 'SuperPointConfig'=None, hidden_size: int=256, keypoint_encoder_sizes: Optional[list[int]]=None, gnn_layers_types: Optional[list[str]]=None, num_attention_heads: int=4, sinkhorn_iterations: int=100, matching_threshold: float=0.0, initializer_range: float=0.02, **kwargs):
self.gnn_layers_types = gnn_layers_types if gnn_layers_types is not None else ['self', 'cross'] * 9
if not all((layer_type in ['self', 'cross'] for layer_type in self.gnn_layers_types)):
raise ValueError("All gnn_layers_types must be either 'self' or 'cross'")
if hidden_size % num_attention_heads != 0:
raise ValueError('hidden_size % num_attention_heads is different from zero')
self.keypoint_encoder_sizes = keypoint_encoder_sizes if keypoint_encoder_sizes is not None else [32, 64, 128, 256]
self.hidden_size = hidden_size
self.keypoint_encoder_sizes = keypoint_encoder_sizes
self.gnn_layers_types = gnn_layers_types
self.num_attention_heads = num_attention_heads
self.sinkhorn_iterations = sinkhorn_iterations
self.matching_threshold = matching_threshold
if isinstance(keypoint_detector_config, dict):
keypoint_detector_config['model_type'] = keypoint_detector_config.get('model_type', 'superpoint')
keypoint_detector_config = CONFIG_MAPPING[keypoint_detector_config['model_type']](**keypoint_detector_config)
if keypoint_detector_config is None:
keypoint_detector_config = CONFIG_MAPPING['superpoint']()
self.keypoint_detector_config = keypoint_detector_config
self.initializer_range = initializer_range
self.attention_probs_dropout_prob = 0
self.is_decoder = False
super().__init__(**kwargs)
@property
def sub_configs(self):
return {'keypoint_detector_config': type(self.keypoint_detector_config)}
|
class SuperGlueConfig(PretrainedConfig):
'''
This is the configuration class to store the configuration of a [`SuperGlueModel`]. It is used to instantiate a
SuperGlue model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the SuperGlue
[magic-leap-community/superglue_indoor](https://huggingface.co/magic-leap-community/superglue_indoor) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
keypoint_detector_config (`Union[AutoConfig, dict]`, *optional*, defaults to `SuperPointConfig`):
The config object or dictionary of the keypoint detector.
hidden_size (`int`, *optional*, defaults to 256):
The dimension of the descriptors.
keypoint_encoder_sizes (`list[int]`, *optional*, defaults to `[32, 64, 128, 256]`):
The sizes of the keypoint encoder layers.
gnn_layers_types (`list[str]`, *optional*, defaults to `['self', 'cross', 'self', 'cross', 'self', 'cross', 'self', 'cross', 'self', 'cross', 'self', 'cross', 'self', 'cross', 'self', 'cross', 'self', 'cross']`):
The types of the GNN layers. Must be either 'self' or 'cross'.
num_attention_heads (`int`, *optional*, defaults to 4):
The number of heads in the GNN layers.
sinkhorn_iterations (`int`, *optional*, defaults to 100):
The number of Sinkhorn iterations.
matching_threshold (`float`, *optional*, defaults to 0.0):
The matching threshold.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
Examples:
```python
>>> from transformers import SuperGlueConfig, SuperGlueModel
>>> # Initializing a SuperGlue superglue style configuration
>>> configuration = SuperGlueConfig()
>>> # Initializing a model from the superglue style configuration
>>> model = SuperGlueModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```
'''
def __init__(self, keypoint_detector_config: 'SuperPointConfig'=None, hidden_size: int=256, keypoint_encoder_sizes: Optional[list[int]]=None, gnn_layers_types: Optional[list[str]]=None, num_attention_heads: int=4, sinkhorn_iterations: int=100, matching_threshold: float=0.0, initializer_range: float=0.02, **kwargs):
pass
@property
def sub_configs(self):
pass
| 4
| 1
| 46
| 5
| 40
| 1
| 8
| 0.86
| 1
| 6
| 0
| 0
| 1
| 10
| 1
| 1
| 91
| 13
| 42
| 24
| 29
| 36
| 25
| 13
| 23
| 8
| 1
| 1
| 8
|
5,422
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/superglue/image_processing_superglue.py
|
transformers.models.superglue.image_processing_superglue.SuperGlueImageProcessor
|
from ...image_transforms import resize, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, ImageType, PILImageResampling, get_image_type, infer_channel_dimension_format, is_pil_image, is_scaled_image, is_torch_available, is_valid_image, is_vision_available, to_numpy_array, valid_images, validate_preprocess_arguments
from typing import TYPE_CHECKING, Optional, Union
from ...utils.import_utils import requires
import numpy as np
from ...utils import TensorType, logging, requires_backends
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
@requires(backends=('torch',))
class SuperGlueImageProcessor(BaseImageProcessor):
"""
Constructs a SuperGlue image processor.
Args:
do_resize (`bool`, *optional*, defaults to `True`):
Controls whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden
by `do_resize` in the `preprocess` method.
size (`dict[str, int]` *optional*, defaults to `{"height": 480, "width": 640}`):
Resolution of the output image after `resize` is applied. Only has an effect if `do_resize` is set to
`True`. Can be overridden by `size` in the `preprocess` method.
resample (`PILImageResampling`, *optional*, defaults to `Resampling.BILINEAR`):
Resampling filter to use if resizing the image. Can be overridden by `resample` in the `preprocess` method.
do_rescale (`bool`, *optional*, defaults to `True`):
Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by `do_rescale` in
the `preprocess` method.
rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
Scale factor to use if rescaling the image. Can be overridden by `rescale_factor` in the `preprocess`
method.
do_grayscale (`bool`, *optional*, defaults to `True`):
Whether to convert the image to grayscale. Can be overridden by `do_grayscale` in the `preprocess` method.
"""
model_input_names = ['pixel_values']
def __init__(self, do_resize: bool=True, size: Optional[dict[str, int]]=None, resample: PILImageResampling=PILImageResampling.BILINEAR, do_rescale: bool=True, rescale_factor: float=1 / 255, do_grayscale: bool=True, **kwargs) -> None:
super().__init__(**kwargs)
size = size if size is not None else {'height': 480, 'width': 640}
size = get_size_dict(size, default_to_square=False)
self.do_resize = do_resize
self.size = size
self.resample = resample
self.do_rescale = do_rescale
self.rescale_factor = rescale_factor
self.do_grayscale = do_grayscale
def resize(self, image: np.ndarray, size: dict[str, int], data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs):
"""
Resize an image.
Args:
image (`np.ndarray`):
Image to resize.
size (`dict[str, int]`):
Dictionary of the form `{"height": int, "width": int}`, specifying the size of the output image.
data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the output image. If not provided, it will be inferred from the input
image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
"""
size = get_size_dict(size, default_to_square=False)
return resize(image, size=(size['height'], size['width']), data_format=data_format, input_data_format=input_data_format, **kwargs)
def preprocess(self, images, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, resample: Optional[PILImageResampling]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_grayscale: Optional[bool]=None, return_tensors: Optional[Union[str, TensorType]]=None, data_format: ChannelDimension=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> BatchFeature:
"""
Preprocess an image or batch of images.
Args:
images (`ImageInput`):
Image pairs to preprocess. Expects either a list of 2 images or a list of list of 2 images list with
pixel values ranging from 0 to 255. If passing in images with pixel values between 0 and 1, set
`do_rescale=False`.
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
Whether to resize the image.
size (`dict[str, int]`, *optional*, defaults to `self.size`):
Size of the output image after `resize` has been applied. If `size["shortest_edge"]` >= 384, the image
is resized to `(size["shortest_edge"], size["shortest_edge"])`. Otherwise, the smaller edge of the
image will be matched to `int(size["shortest_edge"]/ crop_pct)`, after which the image is cropped to
`(size["shortest_edge"], size["shortest_edge"])`. Only has an effect if `do_resize` is set to `True`.
resample (`PILImageResampling`, *optional*, defaults to `self.resample`):
Resampling filter to use if resizing the image. This can be one of `PILImageResampling`, filters. Only
has an effect if `do_resize` is set to `True`.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the image values between [0 - 1].
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
do_grayscale (`bool`, *optional*, defaults to `self.do_grayscale`):
Whether to convert the image to grayscale.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- Unset: Use the channel dimension format of the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
"""
do_resize = do_resize if do_resize is not None else self.do_resize
resample = resample if resample is not None else self.resample
do_rescale = do_rescale if do_rescale is not None else self.do_rescale
rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
do_grayscale = do_grayscale if do_grayscale is not None else self.do_grayscale
size = size if size is not None else self.size
size = get_size_dict(size, default_to_square=False)
images = validate_and_format_image_pairs(images)
if not valid_images(images):
raise ValueError('Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, or torch.Tensor')
validate_preprocess_arguments(do_resize=do_resize, size=size, resample=resample, do_rescale=do_rescale, rescale_factor=rescale_factor)
images = [to_numpy_array(image) for image in images]
if is_scaled_image(images[0]) and do_rescale:
logger.warning_once('It looks like you are trying to rescale already rescaled images. If the input images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again.')
if input_data_format is None:
input_data_format = infer_channel_dimension_format(images[0])
all_images = []
for image in images:
if do_resize:
image = self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format)
if do_rescale:
image = self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
if do_grayscale:
image = convert_to_grayscale(image, input_data_format=input_data_format)
image = to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format)
all_images.append(image)
image_pairs = [all_images[i:i + 2] for i in range(0, len(all_images), 2)]
data = {'pixel_values': image_pairs}
return BatchFeature(data=data, tensor_type=return_tensors)
def post_process_keypoint_matching(self, outputs: 'KeypointMatchingOutput', target_sizes: Union[TensorType, list[tuple]], threshold: float=0.0) -> list[dict[str, torch.Tensor]]:
"""
Converts the raw output of [`KeypointMatchingOutput`] into lists of keypoints, scores and descriptors
with coordinates absolute to the original image sizes.
Args:
outputs ([`KeypointMatchingOutput`]):
Raw outputs of the model.
target_sizes (`torch.Tensor` or `list[tuple[tuple[int, int]]]`, *optional*):
Tensor of shape `(batch_size, 2, 2)` or list of tuples of tuples (`tuple[int, int]`) containing the
target size `(height, width)` of each image in the batch. This must be the original image size (before
any processing).
threshold (`float`, *optional*, defaults to 0.0):
Threshold to filter out the matches with low scores.
Returns:
`list[Dict]`: A list of dictionaries, each dictionary containing the keypoints in the first and second image
of the pair, the matching scores and the matching indices.
"""
if outputs.mask.shape[0] != len(target_sizes):
raise ValueError('Make sure that you pass in as many target sizes as the batch dimension of the mask')
if not all((len(target_size) == 2 for target_size in target_sizes)):
raise ValueError('Each element of target_sizes must contain the size (h, w) of each image of the batch')
if isinstance(target_sizes, list):
image_pair_sizes = torch.tensor(target_sizes, device=outputs.mask.device)
else:
if target_sizes.shape[1] != 2 or target_sizes.shape[2] != 2:
raise ValueError('Each element of target_sizes must contain the size (h, w) of each image of the batch')
image_pair_sizes = target_sizes
keypoints = outputs.keypoints.clone()
keypoints = keypoints * image_pair_sizes.flip(-1).reshape(-1, 2, 1, 2)
keypoints = keypoints.to(torch.int32)
results = []
for mask_pair, keypoints_pair, matches, scores in zip(outputs.mask, keypoints, outputs.matches[:, 0], outputs.matching_scores[:, 0]):
mask0 = mask_pair[0] > 0
mask1 = mask_pair[1] > 0
keypoints0 = keypoints_pair[0][mask0]
keypoints1 = keypoints_pair[1][mask1]
matches0 = matches[mask0]
scores0 = scores[mask0]
valid_matches = torch.logical_and(scores0 > threshold, matches0 > -1)
matched_keypoints0 = keypoints0[valid_matches]
matched_keypoints1 = keypoints1[matches0[valid_matches]]
matching_scores = scores0[valid_matches]
results.append({'keypoints0': matched_keypoints0, 'keypoints1': matched_keypoints1, 'matching_scores': matching_scores})
return results
def visualize_keypoint_matching(self, images: ImageInput, keypoint_matching_output: list[dict[str, torch.Tensor]]) -> list['Image.Image']:
"""
Plots the image pairs side by side with the detected keypoints as well as the matching between them.
Args:
images (`ImageInput`):
Image pairs to plot. Same as `SuperGlueImageProcessor.preprocess`. Expects either a list of 2
images or a list of list of 2 images list with pixel values ranging from 0 to 255.
keypoint_matching_output (List[Dict[str, torch.Tensor]]]):
A post processed keypoint matching output
Returns:
`List[PIL.Image.Image]`: A list of PIL images, each containing the image pairs side by side with the detected
keypoints as well as the matching between them.
"""
images = validate_and_format_image_pairs(images)
images = [to_numpy_array(image) for image in images]
image_pairs = [images[i:i + 2] for i in range(0, len(images), 2)]
results = []
for image_pair, pair_output in zip(image_pairs, keypoint_matching_output):
height0, width0 = image_pair[0].shape[:2]
height1, width1 = image_pair[1].shape[:2]
plot_image = np.zeros((max(height0, height1), width0 + width1, 3), dtype=np.uint8)
plot_image[:height0, :width0] = image_pair[0]
plot_image[:height1, width0:] = image_pair[1]
plot_image_pil = Image.fromarray(plot_image)
draw = ImageDraw.Draw(plot_image_pil)
keypoints0_x, keypoints0_y = pair_output['keypoints0'].unbind(1)
keypoints1_x, keypoints1_y = pair_output['keypoints1'].unbind(1)
for keypoint0_x, keypoint0_y, keypoint1_x, keypoint1_y, matching_score in zip(keypoints0_x, keypoints0_y, keypoints1_x, keypoints1_y, pair_output['matching_scores']):
color = self._get_color(matching_score)
draw.line((keypoint0_x, keypoint0_y, keypoint1_x + width0, keypoint1_y), fill=color, width=3)
draw.ellipse((keypoint0_x - 2, keypoint0_y - 2, keypoint0_x + 2, keypoint0_y + 2), fill='black')
draw.ellipse((keypoint1_x + width0 - 2, keypoint1_y - 2, keypoint1_x + width0 + 2, keypoint1_y + 2), fill='black')
results.append(plot_image_pil)
return results
def _get_color(self, score):
"""Maps a score to a color."""
r = int(255 * (1 - score))
g = int(255 * score)
b = 0
return (r, g, b)
|
@requires(backends=('torch',))
class SuperGlueImageProcessor(BaseImageProcessor):
'''
Constructs a SuperGlue image processor.
Args:
do_resize (`bool`, *optional*, defaults to `True`):
Controls whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden
by `do_resize` in the `preprocess` method.
size (`dict[str, int]` *optional*, defaults to `{"height": 480, "width": 640}`):
Resolution of the output image after `resize` is applied. Only has an effect if `do_resize` is set to
`True`. Can be overridden by `size` in the `preprocess` method.
resample (`PILImageResampling`, *optional*, defaults to `Resampling.BILINEAR`):
Resampling filter to use if resizing the image. Can be overridden by `resample` in the `preprocess` method.
do_rescale (`bool`, *optional*, defaults to `True`):
Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by `do_rescale` in
the `preprocess` method.
rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
Scale factor to use if rescaling the image. Can be overridden by `rescale_factor` in the `preprocess`
method.
do_grayscale (`bool`, *optional*, defaults to `True`):
Whether to convert the image to grayscale. Can be overridden by `do_grayscale` in the `preprocess` method.
'''
def __init__(self, do_resize: bool=True, size: Optional[dict[str, int]]=None, resample: PILImageResampling=PILImageResampling.BILINEAR, do_rescale: bool=True, rescale_factor: float=1 / 255, do_grayscale: bool=True, **kwargs) -> None:
pass
def resize(self, image: np.ndarray, size: dict[str, int], data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs):
'''
Resize an image.
Args:
image (`np.ndarray`):
Image to resize.
size (`dict[str, int]`):
Dictionary of the form `{"height": int, "width": int}`, specifying the size of the output image.
data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the output image. If not provided, it will be inferred from the input
image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
'''
pass
def preprocess(self, images, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, resample: Optional[PILImageResampling]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_grayscale: Optional[bool]=None, return_tensors: Optional[Union[str, TensorType]]=None, data_format: ChannelDimension=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> BatchFeature:
'''
Preprocess an image or batch of images.
Args:
images (`ImageInput`):
Image pairs to preprocess. Expects either a list of 2 images or a list of list of 2 images list with
pixel values ranging from 0 to 255. If passing in images with pixel values between 0 and 1, set
`do_rescale=False`.
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
Whether to resize the image.
size (`dict[str, int]`, *optional*, defaults to `self.size`):
Size of the output image after `resize` has been applied. If `size["shortest_edge"]` >= 384, the image
is resized to `(size["shortest_edge"], size["shortest_edge"])`. Otherwise, the smaller edge of the
image will be matched to `int(size["shortest_edge"]/ crop_pct)`, after which the image is cropped to
`(size["shortest_edge"], size["shortest_edge"])`. Only has an effect if `do_resize` is set to `True`.
resample (`PILImageResampling`, *optional*, defaults to `self.resample`):
Resampling filter to use if resizing the image. This can be one of `PILImageResampling`, filters. Only
has an effect if `do_resize` is set to `True`.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the image values between [0 - 1].
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
do_grayscale (`bool`, *optional*, defaults to `self.do_grayscale`):
Whether to convert the image to grayscale.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- Unset: Use the channel dimension format of the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
'''
pass
def post_process_keypoint_matching(self, outputs: 'KeypointMatchingOutput', target_sizes: Union[TensorType, list[tuple]], threshold: float=0.0) -> list[dict[str, torch.Tensor]]:
'''
Converts the raw output of [`KeypointMatchingOutput`] into lists of keypoints, scores and descriptors
with coordinates absolute to the original image sizes.
Args:
outputs ([`KeypointMatchingOutput`]):
Raw outputs of the model.
target_sizes (`torch.Tensor` or `list[tuple[tuple[int, int]]]`, *optional*):
Tensor of shape `(batch_size, 2, 2)` or list of tuples of tuples (`tuple[int, int]`) containing the
target size `(height, width)` of each image in the batch. This must be the original image size (before
any processing).
threshold (`float`, *optional*, defaults to 0.0):
Threshold to filter out the matches with low scores.
Returns:
`list[Dict]`: A list of dictionaries, each dictionary containing the keypoints in the first and second image
of the pair, the matching scores and the matching indices.
'''
pass
def visualize_keypoint_matching(self, images: ImageInput, keypoint_matching_output: list[dict[str, torch.Tensor]]) -> list['Image.Image']:
'''
Plots the image pairs side by side with the detected keypoints as well as the matching between them.
Args:
images (`ImageInput`):
Image pairs to plot. Same as `SuperGlueImageProcessor.preprocess`. Expects either a list of 2
images or a list of list of 2 images list with pixel values ranging from 0 to 255.
keypoint_matching_output (List[Dict[str, torch.Tensor]]]):
A post processed keypoint matching output
Returns:
`List[PIL.Image.Image]`: A list of PIL images, each containing the image pairs side by side with the detected
keypoints as well as the matching between them.
'''
pass
def _get_color(self, score):
'''Maps a score to a color.'''
pass
| 8
| 6
| 61
| 7
| 33
| 21
| 6
| 0.77
| 1
| 11
| 2
| 0
| 4
| 6
| 4
| 24
| 271
| 32
| 135
| 63
| 96
| 104
| 72
| 29
| 67
| 14
| 3
| 2
| 23
|
5,423
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/superglue/modeling_superglue.py
|
transformers.models.superglue.modeling_superglue.KeypointMatchingOutput
|
from dataclasses import dataclass
from typing import Optional, Union
import torch
from ...utils import ModelOutput, auto_docstring, logging
@dataclass
@auto_docstring(custom_intro='\n Base class for outputs of keypoint matching models. Due to the nature of keypoint detection and matching, the number\n of keypoints is not fixed and can vary from image to image, which makes batching non-trivial. In the batch of\n images, the maximum number of matches is set as the dimension of the matches and matching scores. The mask tensor is\n used to indicate which values in the keypoints, matches and matching_scores tensors are keypoint matching\n information.\n ')
class KeypointMatchingOutput(ModelOutput):
"""
loss (`torch.FloatTensor` of shape `(1,)`, *optional*):
Loss computed during training.
matches (`torch.FloatTensor` of shape `(batch_size, 2, num_matches)`):
Index of keypoint matched in the other image.
matching_scores (`torch.FloatTensor` of shape `(batch_size, 2, num_matches)`):
Scores of predicted matches.
keypoints (`torch.FloatTensor` of shape `(batch_size, num_keypoints, 2)`):
Absolute (x, y) coordinates of predicted keypoints in a given image.
mask (`torch.IntTensor` of shape `(batch_size, num_keypoints)`):
Mask indicating which values in matches and matching_scores are keypoint matching information.
hidden_states (`tuple[torch.FloatTensor, ...]`, *optional*):
Tuple of `torch.FloatTensor` (one for the output of each stage) of shape `(batch_size, 2, num_channels,
num_keypoints)`, returned when `output_hidden_states=True` is passed or when
`config.output_hidden_states=True`)
attentions (`tuple[torch.FloatTensor, ...]`, *optional*):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, 2, num_heads, num_keypoints,
num_keypoints)`, returned when `output_attentions=True` is passed or when `config.output_attentions=True`)
"""
loss: Optional[torch.FloatTensor] = None
matches: Optional[torch.FloatTensor] = None
matching_scores: Optional[torch.FloatTensor] = None
keypoints: Optional[torch.FloatTensor] = None
mask: Optional[torch.IntTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor]] = None
attentions: Optional[tuple[torch.FloatTensor]] = None
|
@dataclass
@auto_docstring(custom_intro='\n Base class for outputs of keypoint matching models. Due to the nature of keypoint detection and matching, the number\n of keypoints is not fixed and can vary from image to image, which makes batching non-trivial. In the batch of\n images, the maximum number of matches is set as the dimension of the matches and matching scores. The mask tensor is\n used to indicate which values in the keypoints, matches and matching_scores tensors are keypoint matching\n information.\n ')
class KeypointMatchingOutput(ModelOutput):
'''
loss (`torch.FloatTensor` of shape `(1,)`, *optional*):
Loss computed during training.
matches (`torch.FloatTensor` of shape `(batch_size, 2, num_matches)`):
Index of keypoint matched in the other image.
matching_scores (`torch.FloatTensor` of shape `(batch_size, 2, num_matches)`):
Scores of predicted matches.
keypoints (`torch.FloatTensor` of shape `(batch_size, num_keypoints, 2)`):
Absolute (x, y) coordinates of predicted keypoints in a given image.
mask (`torch.IntTensor` of shape `(batch_size, num_keypoints)`):
Mask indicating which values in matches and matching_scores are keypoint matching information.
hidden_states (`tuple[torch.FloatTensor, ...]`, *optional*):
Tuple of `torch.FloatTensor` (one for the output of each stage) of shape `(batch_size, 2, num_channels,
num_keypoints)`, returned when `output_hidden_states=True` is passed or when
`config.output_hidden_states=True`)
attentions (`tuple[torch.FloatTensor, ...]`, *optional*):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, 2, num_heads, num_keypoints,
num_keypoints)`, returned when `output_attentions=True` is passed or when `config.output_attentions=True`)
'''
pass
| 3
| 1
| 0
| 0
| 0
| 0
| 0
| 3.13
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 35
| 2
| 8
| 8
| 7
| 25
| 8
| 8
| 7
| 0
| 1
| 0
| 0
|
5,424
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/superglue/modeling_superglue.py
|
transformers.models.superglue.modeling_superglue.SuperGlueAttention
|
from torch import nn
from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer
from typing import Optional, Union
import torch
class SuperGlueAttention(nn.Module):
def __init__(self, config, position_embedding_type=None):
super().__init__()
self.self = SUPERGLUE_SELF_ATTENTION_CLASSES[config._attn_implementation](config, position_embedding_type=position_embedding_type)
self.output = SuperGlueSelfOutput(config)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads)
self.self.query = prune_linear_layer(self.self.query, index)
self.self.key = prune_linear_layer(self.self.key, index)
self.self.value = prune_linear_layer(self.self.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=False) -> tuple[torch.Tensor]:
self_outputs = self.self(hidden_states, attention_mask=attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_attentions=output_attentions)
attention_output = self.output(self_outputs[0], hidden_states)
outputs = (attention_output,) + self_outputs[1:]
return outputs
|
class SuperGlueAttention(nn.Module):
def __init__(self, config, position_embedding_type=None):
pass
def prune_heads(self, heads):
pass
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=False) -> tuple[torch.Tensor]:
pass
| 4
| 0
| 15
| 1
| 14
| 1
| 1
| 0.07
| 1
| 5
| 1
| 0
| 3
| 3
| 3
| 13
| 49
| 4
| 43
| 20
| 30
| 3
| 22
| 11
| 18
| 2
| 1
| 1
| 4
|
5,425
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/superglue/modeling_superglue.py
|
transformers.models.superglue.modeling_superglue.SuperGlueAttentionalGNN
|
from transformers.models.superglue.configuration_superglue import SuperGlueConfig
from torch import nn
from typing import Optional, Union
import torch
class SuperGlueAttentionalGNN(nn.Module):
def __init__(self, config: SuperGlueConfig) -> None:
super().__init__()
self.hidden_size = config.hidden_size
self.layers_types = config.gnn_layers_types
self.layers = nn.ModuleList([SuperGlueAttentionalPropagation(config) for _ in range(len(self.layers_types))])
def forward(self, descriptors: torch.Tensor, mask: Optional[torch.Tensor]=None, output_attentions: bool=False, output_hidden_states: Optional[bool]=False) -> tuple[torch.Tensor, Optional[tuple], Optional[tuple]]:
all_hidden_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
batch_size, num_keypoints, _ = descriptors.shape
if output_hidden_states:
all_hidden_states = all_hidden_states + (descriptors,)
for gnn_layer, layer_type in zip(self.layers, self.layers_types):
encoder_hidden_states = None
encoder_attention_mask = None
if layer_type == 'cross':
encoder_hidden_states = descriptors.reshape(-1, 2, num_keypoints, self.hidden_size).flip(1).reshape(batch_size, num_keypoints, self.hidden_size)
encoder_attention_mask = mask.reshape(-1, 2, 1, 1, num_keypoints).flip(1).reshape(batch_size, 1, 1, num_keypoints) if mask is not None else None
gnn_outputs = gnn_layer(descriptors, attention_mask=mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_hidden_states=output_hidden_states, output_attentions=output_attentions)
delta = gnn_outputs[0]
if output_hidden_states:
all_hidden_states = all_hidden_states + gnn_outputs[1]
if output_attentions:
all_attentions = all_attentions + gnn_outputs[2]
descriptors = descriptors + delta
return (descriptors, all_hidden_states, all_attentions)
|
class SuperGlueAttentionalGNN(nn.Module):
def __init__(self, config: SuperGlueConfig) -> None:
pass
def forward(self, descriptors: torch.Tensor, mask: Optional[torch.Tensor]=None, output_attentions: bool=False, output_hidden_states: Optional[bool]=False) -> tuple[torch.Tensor, Optional[tuple], Optional[tuple]]:
pass
| 3
| 0
| 26
| 3
| 23
| 0
| 5
| 0
| 1
| 7
| 2
| 0
| 2
| 3
| 2
| 12
| 53
| 6
| 47
| 20
| 38
| 0
| 26
| 14
| 23
| 9
| 1
| 2
| 10
|
5,426
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/superglue/modeling_superglue.py
|
transformers.models.superglue.modeling_superglue.SuperGlueAttentionalPropagation
|
import torch
from torch import nn
from transformers.models.superglue.configuration_superglue import SuperGlueConfig
from typing import Optional, Union
class SuperGlueAttentionalPropagation(nn.Module):
def __init__(self, config: SuperGlueConfig) -> None:
super().__init__()
hidden_size = config.hidden_size
self.attention = SuperGlueAttention(config)
mlp_channels = [hidden_size * 2, hidden_size * 2, hidden_size]
layers = [SuperGlueMultiLayerPerceptron(config, mlp_channels[i - 1], mlp_channels[i]) for i in range(1, len(mlp_channels) - 1)]
layers.append(nn.Linear(mlp_channels[-2], mlp_channels[-1]))
self.mlp = nn.ModuleList(layers)
def forward(self, descriptors: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, output_attentions: bool=False, output_hidden_states: bool=False) -> tuple[torch.Tensor, Optional[tuple[torch.Tensor]], Optional[tuple[torch.Tensor]]]:
attention_outputs = self.attention(descriptors, attention_mask=attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_attentions=output_attentions)
output = attention_outputs[0]
attention = attention_outputs[1:]
hidden_state = torch.cat([descriptors, output], dim=2)
all_hidden_states = () if output_hidden_states else None
for layer in self.mlp:
hidden_state = layer(hidden_state)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_state,)
return (hidden_state, all_hidden_states, attention)
|
class SuperGlueAttentionalPropagation(nn.Module):
def __init__(self, config: SuperGlueConfig) -> None:
pass
def forward(self, descriptors: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, output_attentions: bool=False, output_hidden_states: bool=False) -> tuple[torch.Tensor, Optional[tuple[torch.Tensor]], Optional[tuple[torch.Tensor]]]:
pass
| 3
| 0
| 20
| 2
| 18
| 0
| 3
| 0
| 1
| 7
| 3
| 0
| 2
| 2
| 2
| 12
| 41
| 4
| 37
| 22
| 26
| 0
| 20
| 14
| 17
| 4
| 1
| 2
| 5
|
5,427
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/superglue/modeling_superglue.py
|
transformers.models.superglue.modeling_superglue.SuperGlueFinalProjection
|
import torch
from transformers.models.superglue.configuration_superglue import SuperGlueConfig
from torch import nn
class SuperGlueFinalProjection(nn.Module):
def __init__(self, config: SuperGlueConfig) -> None:
super().__init__()
hidden_size = config.hidden_size
self.final_proj = nn.Linear(hidden_size, hidden_size, bias=True)
def forward(self, descriptors: torch.Tensor) -> torch.Tensor:
return self.final_proj(descriptors)
|
class SuperGlueFinalProjection(nn.Module):
def __init__(self, config: SuperGlueConfig) -> None:
pass
def forward(self, descriptors: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 3
| 0
| 3
| 0
| 1
| 0
| 1
| 3
| 1
| 0
| 2
| 1
| 2
| 12
| 8
| 1
| 7
| 5
| 4
| 0
| 7
| 5
| 4
| 1
| 1
| 0
| 2
|
5,428
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/superglue/modeling_superglue.py
|
transformers.models.superglue.modeling_superglue.SuperGlueForKeypointMatching
|
from typing import Optional, Union
from transformers.models.superglue.configuration_superglue import SuperGlueConfig
from ...utils import ModelOutput, auto_docstring, logging
from ..auto import AutoModelForKeypointDetection
import torch
@auto_docstring(custom_intro='\n SuperGlue model taking images as inputs and outputting the matching of them.\n ')
class SuperGlueForKeypointMatching(SuperGluePreTrainedModel):
"""SuperGlue feature matching middle-end
Given two sets of keypoints and locations, we determine the
correspondences by:
1. Keypoint Encoding (normalization + visual feature and location fusion)
2. Graph Neural Network with multiple self and cross-attention layers
3. Final projection layer
4. Optimal Transport Layer (a differentiable Hungarian matching algorithm)
5. Thresholding matrix based on mutual exclusivity and a match_threshold
The correspondence ids use -1 to indicate non-matching points.
Paul-Edouard Sarlin, Daniel DeTone, Tomasz Malisiewicz, and Andrew
Rabinovich. SuperGlue: Learning Feature Matching with Graph Neural
Networks. In CVPR, 2020. https://huggingface.co/papers/1911.11763
"""
def __init__(self, config: SuperGlueConfig) -> None:
super().__init__(config)
self.keypoint_detector = AutoModelForKeypointDetection.from_config(config.keypoint_detector_config)
self.keypoint_encoder = SuperGlueKeypointEncoder(config)
self.gnn = SuperGlueAttentionalGNN(config)
self.final_projection = SuperGlueFinalProjection(config)
bin_score = torch.nn.Parameter(torch.tensor(1.0))
self.register_parameter('bin_score', bin_score)
self.post_init()
def _match_image_pair(self, keypoints: torch.Tensor, descriptors: torch.Tensor, scores: torch.Tensor, height: int, width: int, mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None) -> tuple[torch.Tensor, torch.Tensor, tuple, tuple]:
"""
Perform keypoint matching between two images.
Args:
keypoints (`torch.Tensor` of shape `(batch_size, 2, num_keypoints, 2)`):
Keypoints detected in the pair of image.
descriptors (`torch.Tensor` of shape `(batch_size, 2, descriptor_dim, num_keypoints)`):
Descriptors of the keypoints detected in the image pair.
scores (`torch.Tensor` of shape `(batch_size, 2, num_keypoints)`):
Confidence scores of the keypoints detected in the image pair.
height (`int`): Image height.
width (`int`): Image width.
mask (`torch.Tensor` of shape `(batch_size, 2, num_keypoints)`, *optional*):
Mask indicating which values in the keypoints, matches and matching_scores tensors are keypoint matching
information.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors. Default to `config.output_attentions`.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. Default to `config.output_hidden_states`.
Returns:
matches (`torch.Tensor` of shape `(batch_size, 2, num_keypoints)`):
For each image pair, for each keypoint in image0, the index of the keypoint in image1 that was matched
with. And for each keypoint in image1, the index of the keypoint in image0 that was matched with.
matching_scores (`torch.Tensor` of shape `(batch_size, 2, num_keypoints)`):
Scores of predicted matches for each image pair
all_hidden_states (`tuple(torch.FloatTensor)`, *optional*):
Tuple of `torch.FloatTensor` (one for the output of each stage) of shape `(1, 2, num_keypoints,
num_channels)`.
all_attentions (`tuple(torch.FloatTensor)`, *optional*):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(1, 2, num_heads, num_keypoints,
num_keypoints)`.
"""
all_hidden_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
if keypoints.shape[2] == 0:
shape = keypoints.shape[:-1]
return (keypoints.new_full(shape, -1, dtype=torch.int), keypoints.new_zeros(shape), all_hidden_states, all_attentions)
batch_size, _, num_keypoints, _ = keypoints.shape
keypoints = keypoints.reshape(batch_size * 2, num_keypoints, 2)
descriptors = descriptors.reshape(batch_size * 2, num_keypoints, self.config.hidden_size)
scores = scores.reshape(batch_size * 2, num_keypoints)
mask = mask.reshape(batch_size * 2, num_keypoints) if mask is not None else None
keypoints = normalize_keypoints(keypoints, height, width)
encoded_keypoints = self.keypoint_encoder(keypoints, scores, output_hidden_states=output_hidden_states)
last_hidden_state = encoded_keypoints[0]
descriptors = descriptors + last_hidden_state
if mask is not None:
input_shape = descriptors.size()
extended_attention_mask = self.get_extended_attention_mask(mask, input_shape)
else:
extended_attention_mask = torch.ones((batch_size, num_keypoints), device=keypoints.device)
gnn_outputs = self.gnn(descriptors, mask=extended_attention_mask, output_hidden_states=output_hidden_states, output_attentions=output_attentions)
descriptors = gnn_outputs[0]
projected_descriptors = self.final_projection(descriptors)
final_descriptors = projected_descriptors.reshape(batch_size, 2, num_keypoints, self.config.hidden_size)
final_descriptors0 = final_descriptors[:, 0]
final_descriptors1 = final_descriptors[:, 1]
scores = final_descriptors0 @ final_descriptors1.transpose(1, 2)
scores = scores / self.config.hidden_size ** 0.5
if mask is not None:
mask = mask.reshape(batch_size, 2, num_keypoints)
mask0 = mask[:, 0].unsqueeze(2)
mask1 = mask[:, 1].unsqueeze(1)
mask = torch.logical_and(mask0, mask1)
scores = scores.masked_fill(mask == 0, torch.finfo(scores.dtype).min)
scores = log_optimal_transport(scores, self.bin_score, iterations=self.config.sinkhorn_iterations)
max0 = scores[:, :-1, :-1].max(2)
max1 = scores[:, :-1, :-1].max(1)
indices0 = max0.indices
indices1 = max1.indices
mutual0 = arange_like(indices0, 1)[None] == indices1.gather(1, indices0)
mutual1 = arange_like(indices1, 1)[None] == indices0.gather(1, indices1)
zero = scores.new_tensor(0)
matching_scores0 = torch.where(mutual0, max0.values.exp(), zero)
matching_scores0 = torch.where(matching_scores0 > self.config.matching_threshold, matching_scores0, zero)
matching_scores1 = torch.where(mutual1, matching_scores0.gather(1, indices1), zero)
valid0 = mutual0 & (matching_scores0 > zero)
valid1 = mutual1 & valid0.gather(1, indices1)
matches0 = torch.where(valid0, indices0, indices0.new_tensor(-1))
matches1 = torch.where(valid1, indices1, indices1.new_tensor(-1))
matches = torch.cat([matches0, matches1], dim=1).reshape(batch_size, 2, -1)
matching_scores = torch.cat([matching_scores0, matching_scores1], dim=1).reshape(batch_size, 2, -1)
if output_hidden_states:
all_hidden_states = all_hidden_states + encoded_keypoints[1]
all_hidden_states = all_hidden_states + gnn_outputs[1]
all_hidden_states = all_hidden_states + (projected_descriptors,)
all_hidden_states = tuple((x.reshape(batch_size, 2, num_keypoints, -1).transpose(-1, -2) for x in all_hidden_states))
if output_attentions:
all_attentions = all_attentions + gnn_outputs[2]
all_attentions = tuple((x.reshape(batch_size, 2, -1, num_keypoints, num_keypoints) for x in all_attentions))
return (matches, matching_scores, all_hidden_states, all_attentions)
@auto_docstring
def forward(self, pixel_values: torch.FloatTensor, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, KeypointMatchingOutput]:
"""
Examples:
```python
>>> from transformers import AutoImageProcessor, AutoModel
>>> import torch
>>> from PIL import Image
>>> import requests
>>> url = "https://github.com/magicleap/SuperGluePretrainedNetwork/blob/master/assets/phototourism_sample_images/london_bridge_78916675_4568141288.jpg?raw=true"
>>> image1 = Image.open(requests.get(url, stream=True).raw)
>>> url = "https://github.com/magicleap/SuperGluePretrainedNetwork/blob/master/assets/phototourism_sample_images/london_bridge_19481797_2295892421.jpg?raw=true"
>>> image2 = Image.open(requests.get(url, stream=True).raw)
>>> images = [image1, image2]
>>> processor = AutoImageProcessor.from_pretrained("magic-leap-community/superglue_outdoor")
>>> model = AutoModel.from_pretrained("magic-leap-community/superglue_outdoor")
>>> with torch.no_grad():
>>> inputs = processor(images, return_tensors="pt")
>>> outputs = model(**inputs)
```"""
loss = None
if labels is not None:
raise ValueError('SuperGlue is not trainable, no labels should be provided.')
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values.ndim != 5 or pixel_values.size(1) != 2:
raise ValueError('Input must be a 5D tensor of shape (batch_size, 2, num_channels, height, width)')
batch_size, _, channels, height, width = pixel_values.shape
pixel_values = pixel_values.reshape(batch_size * 2, channels, height, width)
keypoint_detections = self.keypoint_detector(pixel_values)
keypoints, scores, descriptors, mask = keypoint_detections[:4]
keypoints = keypoints.reshape(batch_size, 2, -1, 2).to(pixel_values)
scores = scores.reshape(batch_size, 2, -1).to(pixel_values)
descriptors = descriptors.reshape(batch_size, 2, -1, self.config.hidden_size).to(pixel_values)
mask = mask.reshape(batch_size, 2, -1)
absolute_keypoints = keypoints.clone()
absolute_keypoints[:, :, :, 0] = absolute_keypoints[:, :, :, 0] * width
absolute_keypoints[:, :, :, 1] = absolute_keypoints[:, :, :, 1] * height
matches, matching_scores, hidden_states, attentions = self._match_image_pair(absolute_keypoints, descriptors, scores, height, width, mask=mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states)
if not return_dict:
return tuple((v for v in [loss, matches, matching_scores, keypoints, mask, hidden_states, attentions] if v is not None))
return KeypointMatchingOutput(loss=loss, matches=matches, matching_scores=matching_scores, keypoints=keypoints, mask=mask, hidden_states=hidden_states, attentions=attentions)
|
@auto_docstring(custom_intro='\n SuperGlue model taking images as inputs and outputting the matching of them.\n ')
class SuperGlueForKeypointMatching(SuperGluePreTrainedModel):
'''SuperGlue feature matching middle-end
Given two sets of keypoints and locations, we determine the
correspondences by:
1. Keypoint Encoding (normalization + visual feature and location fusion)
2. Graph Neural Network with multiple self and cross-attention layers
3. Final projection layer
4. Optimal Transport Layer (a differentiable Hungarian matching algorithm)
5. Thresholding matrix based on mutual exclusivity and a match_threshold
The correspondence ids use -1 to indicate non-matching points.
Paul-Edouard Sarlin, Daniel DeTone, Tomasz Malisiewicz, and Andrew
Rabinovich. SuperGlue: Learning Feature Matching with Graph Neural
Networks. In CVPR, 2020. https://huggingface.co/papers/1911.11763
'''
def __init__(self, config: SuperGlueConfig) -> None:
pass
def _match_image_pair(self, keypoints: torch.Tensor, descriptors: torch.Tensor, scores: torch.Tensor, height: int, width: int, mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None) -> tuple[torch.Tensor, torch.Tensor, tuple, tuple]:
'''
Perform keypoint matching between two images.
Args:
keypoints (`torch.Tensor` of shape `(batch_size, 2, num_keypoints, 2)`):
Keypoints detected in the pair of image.
descriptors (`torch.Tensor` of shape `(batch_size, 2, descriptor_dim, num_keypoints)`):
Descriptors of the keypoints detected in the image pair.
scores (`torch.Tensor` of shape `(batch_size, 2, num_keypoints)`):
Confidence scores of the keypoints detected in the image pair.
height (`int`): Image height.
width (`int`): Image width.
mask (`torch.Tensor` of shape `(batch_size, 2, num_keypoints)`, *optional*):
Mask indicating which values in the keypoints, matches and matching_scores tensors are keypoint matching
information.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors. Default to `config.output_attentions`.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. Default to `config.output_hidden_states`.
Returns:
matches (`torch.Tensor` of shape `(batch_size, 2, num_keypoints)`):
For each image pair, for each keypoint in image0, the index of the keypoint in image1 that was matched
with. And for each keypoint in image1, the index of the keypoint in image0 that was matched with.
matching_scores (`torch.Tensor` of shape `(batch_size, 2, num_keypoints)`):
Scores of predicted matches for each image pair
all_hidden_states (`tuple(torch.FloatTensor)`, *optional*):
Tuple of `torch.FloatTensor` (one for the output of each stage) of shape `(1, 2, num_keypoints,
num_channels)`.
all_attentions (`tuple(torch.FloatTensor)`, *optional*):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(1, 2, num_heads, num_keypoints,
num_keypoints)`.
'''
pass
@auto_docstring
def forward(self, pixel_values: torch.FloatTensor, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, KeypointMatchingOutput]:
'''
Examples:
```python
>>> from transformers import AutoImageProcessor, AutoModel
>>> import torch
>>> from PIL import Image
>>> import requests
>>> url = "https://github.com/magicleap/SuperGluePretrainedNetwork/blob/master/assets/phototourism_sample_images/london_bridge_78916675_4568141288.jpg?raw=true"
>>> image1 = Image.open(requests.get(url, stream=True).raw)
>>> url = "https://github.com/magicleap/SuperGluePretrainedNetwork/blob/master/assets/phototourism_sample_images/london_bridge_19481797_2295892421.jpg?raw=true"
>>> image2 = Image.open(requests.get(url, stream=True).raw)
>>> images = [image1, image2]
>>> processor = AutoImageProcessor.from_pretrained("magic-leap-community/superglue_outdoor")
>>> model = AutoModel.from_pretrained("magic-leap-community/superglue_outdoor")
>>> with torch.no_grad():
>>> inputs = processor(images, return_tensors="pt")
>>> outputs = model(**inputs)
```'''
pass
| 6
| 3
| 80
| 12
| 49
| 20
| 6
| 0.48
| 1
| 12
| 6
| 0
| 3
| 5
| 3
| 131
| 262
| 41
| 150
| 63
| 128
| 72
| 91
| 44
| 87
| 9
| 3
| 1
| 17
|
5,429
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/superglue/modeling_superglue.py
|
transformers.models.superglue.modeling_superglue.SuperGlueKeypointEncoder
|
from torch import nn
from transformers.models.superglue.configuration_superglue import SuperGlueConfig
import torch
from typing import Optional, Union
class SuperGlueKeypointEncoder(nn.Module):
def __init__(self, config: SuperGlueConfig) -> None:
super().__init__()
layer_sizes = config.keypoint_encoder_sizes
hidden_size = config.hidden_size
encoder_channels = [3] + layer_sizes + [hidden_size]
layers = [SuperGlueMultiLayerPerceptron(config, encoder_channels[i - 1], encoder_channels[i]) for i in range(1, len(encoder_channels) - 1)]
layers.append(nn.Linear(encoder_channels[-2], encoder_channels[-1]))
self.encoder = nn.ModuleList(layers)
def forward(self, keypoints: torch.Tensor, scores: torch.Tensor, output_hidden_states: Optional[bool]=False) -> tuple[torch.Tensor, Optional[tuple[torch.Tensor]]]:
scores = scores.unsqueeze(2)
hidden_state = torch.cat([keypoints, scores], dim=2)
all_hidden_states = () if output_hidden_states else None
for layer in self.encoder:
hidden_state = layer(hidden_state)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_state,)
return (hidden_state, all_hidden_states)
|
class SuperGlueKeypointEncoder(nn.Module):
def __init__(self, config: SuperGlueConfig) -> None:
pass
def forward(self, keypoints: torch.Tensor, scores: torch.Tensor, output_hidden_states: Optional[bool]=False) -> tuple[torch.Tensor, Optional[tuple[torch.Tensor]]]:
pass
| 3
| 0
| 14
| 1
| 13
| 1
| 3
| 0.04
| 1
| 6
| 2
| 0
| 2
| 1
| 2
| 12
| 29
| 2
| 26
| 16
| 18
| 1
| 18
| 11
| 15
| 4
| 1
| 2
| 5
|
5,430
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/superglue/modeling_superglue.py
|
transformers.models.superglue.modeling_superglue.SuperGlueMultiLayerPerceptron
|
from transformers.models.superglue.configuration_superglue import SuperGlueConfig
from torch import nn
import torch
class SuperGlueMultiLayerPerceptron(nn.Module):
def __init__(self, config: SuperGlueConfig, in_channels: int, out_channels: int) -> None:
super().__init__()
self.linear = nn.Linear(in_channels, out_channels)
self.batch_norm = nn.BatchNorm1d(out_channels)
self.activation = nn.ReLU()
def forward(self, hidden_state: torch.Tensor) -> torch.Tensor:
hidden_state = self.linear(hidden_state)
hidden_state = hidden_state.transpose(-1, -2)
hidden_state = self.batch_norm(hidden_state)
hidden_state = hidden_state.transpose(-1, -2)
hidden_state = self.activation(hidden_state)
return hidden_state
|
class SuperGlueMultiLayerPerceptron(nn.Module):
def __init__(self, config: SuperGlueConfig, in_channels: int, out_channels: int) -> None:
pass
def forward(self, hidden_state: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 6
| 0
| 6
| 0
| 1
| 0
| 1
| 4
| 1
| 0
| 2
| 3
| 2
| 12
| 14
| 1
| 13
| 6
| 10
| 0
| 13
| 6
| 10
| 1
| 1
| 0
| 2
|
5,431
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/superglue/modeling_superglue.py
|
transformers.models.superglue.modeling_superglue.SuperGluePreTrainedModel
|
from torch import nn
from transformers.models.superglue.configuration_superglue import SuperGlueConfig
from ...utils import ModelOutput, auto_docstring, logging
from transformers import PreTrainedModel
@auto_docstring
class SuperGluePreTrainedModel(PreTrainedModel):
config: SuperGlueConfig
base_model_prefix = 'superglue'
main_input_name = 'pixel_values'
def _init_weights(self, module: nn.Module) -> None:
"""Initialize the weights"""
if isinstance(module, (nn.Linear, nn.Conv2d)):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.BatchNorm1d):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if hasattr(module, 'bin_score'):
module.bin_score.data.fill_(1.0)
|
@auto_docstring
class SuperGluePreTrainedModel(PreTrainedModel):
def _init_weights(self, module: nn.Module) -> None:
'''Initialize the weights'''
pass
| 3
| 1
| 13
| 0
| 10
| 3
| 5
| 0.5
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 128
| 23
| 2
| 14
| 5
| 12
| 7
| 12
| 5
| 10
| 5
| 2
| 2
| 5
|
5,432
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/superglue/modeling_superglue.py
|
transformers.models.superglue.modeling_superglue.SuperGlueSelfAttention
|
import torch
from torch import nn
from typing import Optional, Union
import math
class SuperGlueSelfAttention(nn.Module):
def __init__(self, config, position_embedding_type=None):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and (not hasattr(config, 'embedding_size')):
raise ValueError(f'The hidden size ({config.hidden_size}) is not a multiple of the number of attention heads ({config.num_attention_heads})')
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
self.position_embedding_type = position_embedding_type or getattr(config, 'position_embedding_type', 'absolute')
if self.position_embedding_type == 'relative_key' or self.position_embedding_type == 'relative_key_query':
self.max_position_embeddings = config.max_position_embeddings
self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
self.is_decoder = config.is_decoder
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False) -> tuple[torch.Tensor]:
is_cross_attention = encoder_hidden_states is not None
current_states = encoder_hidden_states if is_cross_attention else hidden_states
attention_mask = encoder_attention_mask if is_cross_attention else attention_mask
batch_size = hidden_states.shape[0]
key_layer = self.key(current_states).view(batch_size, -1, self.num_attention_heads, self.attention_head_size).transpose(1, 2)
value_layer = self.value(current_states).view(batch_size, -1, self.num_attention_heads, self.attention_head_size).transpose(1, 2)
query_layer = self.query(hidden_states).view(batch_size, -1, self.num_attention_heads, self.attention_head_size).transpose(1, 2)
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
if self.position_embedding_type == 'relative_key' or self.position_embedding_type == 'relative_key_query':
query_length, key_length = (query_layer.shape[2], key_layer.shape[2])
position_ids_l = torch.arange(query_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
position_ids_r = torch.arange(key_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
distance = position_ids_l - position_ids_r
positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
positional_embedding = positional_embedding.to(dtype=query_layer.dtype)
if self.position_embedding_type == 'relative_key':
relative_position_scores = torch.einsum('bhld,lrd->bhlr', query_layer, positional_embedding)
attention_scores = attention_scores + relative_position_scores
elif self.position_embedding_type == 'relative_key_query':
relative_position_scores_query = torch.einsum('bhld,lrd->bhlr', query_layer, positional_embedding)
relative_position_scores_key = torch.einsum('bhrd,lrd->bhlr', key_layer, positional_embedding)
attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
if attention_mask is not None:
attention_scores = attention_scores + attention_mask
attention_probs = nn.functional.softmax(attention_scores, dim=-1)
attention_probs = self.dropout(attention_probs)
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(new_context_layer_shape)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
if self.is_decoder:
outputs = outputs + (None,)
return outputs
|
class SuperGlueSelfAttention(nn.Module):
def __init__(self, config, position_embedding_type=None):
pass
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False) -> tuple[torch.Tensor]:
pass
| 3
| 0
| 43
| 7
| 31
| 6
| 6
| 0.19
| 1
| 5
| 0
| 0
| 3
| 11
| 3
| 13
| 132
| 22
| 93
| 44
| 80
| 18
| 72
| 35
| 68
| 13
| 1
| 2
| 17
|
5,433
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/superglue/modeling_superglue.py
|
transformers.models.superglue.modeling_superglue.SuperGlueSelfOutput
|
from transformers.models.superglue.configuration_superglue import SuperGlueConfig
import torch
from torch import nn
class SuperGlueSelfOutput(nn.Module):
def __init__(self, config: SuperGlueConfig):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
def forward(self, hidden_states: torch.Tensor, *args) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
return hidden_states
|
class SuperGlueSelfOutput(nn.Module):
def __init__(self, config: SuperGlueConfig):
pass
def forward(self, hidden_states: torch.Tensor, *args) -> torch.Tensor:
pass
| 3
| 0
| 3
| 0
| 3
| 0
| 1
| 0
| 1
| 3
| 1
| 0
| 2
| 1
| 2
| 12
| 8
| 1
| 7
| 4
| 4
| 0
| 7
| 4
| 4
| 1
| 1
| 0
| 2
|
5,434
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/superpoint/configuration_superpoint.py
|
transformers.models.superpoint.configuration_superpoint.SuperPointConfig
|
from ...configuration_utils import PretrainedConfig
class SuperPointConfig(PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`SuperPointForKeypointDetection`]. It is used to instantiate a
SuperPoint model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the SuperPoint
[magic-leap-community/superpoint](https://huggingface.co/magic-leap-community/superpoint) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
encoder_hidden_sizes (`List`, *optional*, defaults to `[64, 64, 128, 128]`):
The number of channels in each convolutional layer in the encoder.
decoder_hidden_size (`int`, *optional*, defaults to 256): The hidden size of the decoder.
keypoint_decoder_dim (`int`, *optional*, defaults to 65): The output dimension of the keypoint decoder.
descriptor_decoder_dim (`int`, *optional*, defaults to 256): The output dimension of the descriptor decoder.
keypoint_threshold (`float`, *optional*, defaults to 0.005):
The threshold to use for extracting keypoints.
max_keypoints (`int`, *optional*, defaults to -1):
The maximum number of keypoints to extract. If `-1`, will extract all keypoints.
nms_radius (`int`, *optional*, defaults to 4):
The radius for non-maximum suppression.
border_removal_distance (`int`, *optional*, defaults to 4):
The distance from the border to remove keypoints.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
Example:
```python
>>> from transformers import SuperPointConfig, SuperPointForKeypointDetection
>>> # Initializing a SuperPoint superpoint style configuration
>>> configuration = SuperPointConfig()
>>> # Initializing a model from the superpoint style configuration
>>> model = SuperPointForKeypointDetection(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = 'superpoint'
def __init__(self, encoder_hidden_sizes: list[int]=[64, 64, 128, 128], decoder_hidden_size: int=256, keypoint_decoder_dim: int=65, descriptor_decoder_dim: int=256, keypoint_threshold: float=0.005, max_keypoints: int=-1, nms_radius: int=4, border_removal_distance: int=4, initializer_range=0.02, **kwargs):
self.encoder_hidden_sizes = encoder_hidden_sizes
self.decoder_hidden_size = decoder_hidden_size
self.keypoint_decoder_dim = keypoint_decoder_dim
self.descriptor_decoder_dim = descriptor_decoder_dim
self.keypoint_threshold = keypoint_threshold
self.max_keypoints = max_keypoints
self.nms_radius = nms_radius
self.border_removal_distance = border_removal_distance
self.initializer_range = initializer_range
super().__init__(**kwargs)
|
class SuperPointConfig(PretrainedConfig):
'''
This is the configuration class to store the configuration of a [`SuperPointForKeypointDetection`]. It is used to instantiate a
SuperPoint model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the SuperPoint
[magic-leap-community/superpoint](https://huggingface.co/magic-leap-community/superpoint) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
encoder_hidden_sizes (`List`, *optional*, defaults to `[64, 64, 128, 128]`):
The number of channels in each convolutional layer in the encoder.
decoder_hidden_size (`int`, *optional*, defaults to 256): The hidden size of the decoder.
keypoint_decoder_dim (`int`, *optional*, defaults to 65): The output dimension of the keypoint decoder.
descriptor_decoder_dim (`int`, *optional*, defaults to 256): The output dimension of the descriptor decoder.
keypoint_threshold (`float`, *optional*, defaults to 0.005):
The threshold to use for extracting keypoints.
max_keypoints (`int`, *optional*, defaults to -1):
The maximum number of keypoints to extract. If `-1`, will extract all keypoints.
nms_radius (`int`, *optional*, defaults to 4):
The radius for non-maximum suppression.
border_removal_distance (`int`, *optional*, defaults to 4):
The distance from the border to remove keypoints.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
Example:
```python
>>> from transformers import SuperPointConfig, SuperPointForKeypointDetection
>>> # Initializing a SuperPoint superpoint style configuration
>>> configuration = SuperPointConfig()
>>> # Initializing a model from the superpoint style configuration
>>> model = SuperPointForKeypointDetection(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```'''
def __init__(self, encoder_hidden_sizes: list[int]=[64, 64, 128, 128], decoder_hidden_size: int=256, keypoint_decoder_dim: int=65, descriptor_decoder_dim: int=256, keypoint_threshold: float=0.005, max_keypoints: int=-1, nms_radius: int=4, border_removal_distance: int=4, initializer_range=0.02, **kwargs):
pass
| 2
| 1
| 24
| 1
| 23
| 0
| 1
| 1.32
| 1
| 3
| 0
| 0
| 1
| 9
| 1
| 1
| 65
| 7
| 25
| 24
| 11
| 33
| 13
| 12
| 11
| 1
| 1
| 0
| 1
|
5,435
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/superpoint/image_processing_superpoint.py
|
transformers.models.superpoint.image_processing_superpoint.SuperPointImageProcessor
|
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...utils import TensorType, logging, requires_backends
from typing import TYPE_CHECKING, Optional, Union
from ...image_utils import ChannelDimension, ImageInput, PILImageResampling, infer_channel_dimension_format, is_scaled_image, make_flat_list_of_images, to_numpy_array, valid_images
from ...image_transforms import resize, to_channel_dimension_format
class SuperPointImageProcessor(BaseImageProcessor):
"""
Constructs a SuperPoint image processor.
Args:
do_resize (`bool`, *optional*, defaults to `True`):
Controls whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden
by `do_resize` in the `preprocess` method.
size (`dict[str, int]` *optional*, defaults to `{"height": 480, "width": 640}`):
Resolution of the output image after `resize` is applied. Only has an effect if `do_resize` is set to
`True`. Can be overridden by `size` in the `preprocess` method.
resample (`Resampling`, *optional*, defaults to `2`):
Resampling filter to use if resizing the image. Can be overridden by `resample` in the `preprocess` method.
do_rescale (`bool`, *optional*, defaults to `True`):
Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by `do_rescale` in
the `preprocess` method.
rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
Scale factor to use if rescaling the image. Can be overridden by `rescale_factor` in the `preprocess`
method.
do_grayscale (`bool`, *optional*, defaults to `False`):
Whether to convert the image to grayscale. Can be overridden by `do_grayscale` in the `preprocess` method.
"""
model_input_names = ['pixel_values']
def __init__(self, do_resize: bool=True, size: Optional[dict[str, int]]=None, resample: PILImageResampling=PILImageResampling.BILINEAR, do_rescale: bool=True, rescale_factor: float=1 / 255, do_grayscale: bool=False, **kwargs) -> None:
super().__init__(**kwargs)
size = size if size is not None else {'height': 480, 'width': 640}
size = get_size_dict(size, default_to_square=False)
self.do_resize = do_resize
self.size = size
self.resample = resample
self.do_rescale = do_rescale
self.rescale_factor = rescale_factor
self.do_grayscale = do_grayscale
def resize(self, image: np.ndarray, size: dict[str, int], data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs):
"""
Resize an image.
Args:
image (`np.ndarray`):
Image to resize.
size (`dict[str, int]`):
Dictionary of the form `{"height": int, "width": int}`, specifying the size of the output image.
data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the output image. If not provided, it will be inferred from the input
image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
"""
size = get_size_dict(size, default_to_square=False)
return resize(image, size=(size['height'], size['width']), data_format=data_format, input_data_format=input_data_format, **kwargs)
def preprocess(self, images, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, resample: Optional[PILImageResampling]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_grayscale: Optional[bool]=None, return_tensors: Optional[Union[str, TensorType]]=None, data_format: ChannelDimension=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> BatchFeature:
"""
Preprocess an image or batch of images.
Args:
images (`ImageInput`):
Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
passing in images with pixel values between 0 and 1, set `do_rescale=False`.
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
Whether to resize the image.
size (`dict[str, int]`, *optional*, defaults to `self.size`):
Size of the output image after `resize` has been applied. If `size["shortest_edge"]` >= 384, the image
is resized to `(size["shortest_edge"], size["shortest_edge"])`. Otherwise, the smaller edge of the
image will be matched to `int(size["shortest_edge"]/ crop_pct)`, after which the image is cropped to
`(size["shortest_edge"], size["shortest_edge"])`. Only has an effect if `do_resize` is set to `True`.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the image values between [0 - 1].
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
do_grayscale (`bool`, *optional*, defaults to `self.do_grayscale`):
Whether to convert the image to grayscale.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- Unset: Use the channel dimension format of the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
"""
do_resize = do_resize if do_resize is not None else self.do_resize
resample = resample if resample is not None else self.resample
do_rescale = do_rescale if do_rescale is not None else self.do_rescale
rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
do_grayscale = do_grayscale if do_grayscale is not None else self.do_grayscale
size = size if size is not None else self.size
size = get_size_dict(size, default_to_square=False)
images = make_flat_list_of_images(images)
if not valid_images(images):
raise ValueError('Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, or torch.Tensor')
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
images = [to_numpy_array(image) for image in images]
if do_rescale and is_scaled_image(images[0]):
logger.warning_once('It looks like you are trying to rescale already rescaled images. If the input images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again.')
if input_data_format is None:
input_data_format = infer_channel_dimension_format(images[0])
if do_resize:
images = [self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format) for image in images]
if do_rescale:
images = [self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format) for image in images]
if input_data_format is None:
input_data_format = infer_channel_dimension_format(images[0])
if do_grayscale:
images = [convert_to_grayscale(image, input_data_format=input_data_format) for image in images]
images = [to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images]
data = {'pixel_values': images}
return BatchFeature(data=data, tensor_type=return_tensors)
def post_process_keypoint_detection(self, outputs: 'SuperPointKeypointDescriptionOutput', target_sizes: Union[TensorType, list[tuple]]) -> list[dict[str, 'torch.Tensor']]:
"""
Converts the raw output of [`SuperPointForKeypointDetection`] into lists of keypoints, scores and descriptors
with coordinates absolute to the original image sizes.
Args:
outputs ([`SuperPointKeypointDescriptionOutput`]):
Raw outputs of the model containing keypoints in a relative (x, y) format, with scores and descriptors.
target_sizes (`torch.Tensor` or `list[tuple[int, int]]`):
Tensor of shape `(batch_size, 2)` or list of tuples (`tuple[int, int]`) containing the target size
`(height, width)` of each image in the batch. This must be the original
image size (before any processing).
Returns:
`list[Dict]`: A list of dictionaries, each dictionary containing the keypoints in absolute format according
to target_sizes, scores and descriptors for an image in the batch as predicted by the model.
"""
if len(outputs.mask) != len(target_sizes):
raise ValueError('Make sure that you pass in as many target sizes as the batch dimension of the mask')
if isinstance(target_sizes, list):
image_sizes = torch.tensor(target_sizes, device=outputs.mask.device)
else:
if target_sizes.shape[1] != 2:
raise ValueError('Each element of target_sizes must contain the size (h, w) of each image of the batch')
image_sizes = target_sizes
image_sizes = torch.flip(image_sizes, [1])
masked_keypoints = outputs.keypoints * image_sizes[:, None]
masked_keypoints = masked_keypoints.to(torch.int32)
results = []
for image_mask, keypoints, scores, descriptors in zip(outputs.mask, masked_keypoints, outputs.scores, outputs.descriptors):
indices = torch.nonzero(image_mask).squeeze(1)
keypoints = keypoints[indices]
scores = scores[indices]
descriptors = descriptors[indices]
results.append({'keypoints': keypoints, 'scores': scores, 'descriptors': descriptors})
return results
|
class SuperPointImageProcessor(BaseImageProcessor):
'''
Constructs a SuperPoint image processor.
Args:
do_resize (`bool`, *optional*, defaults to `True`):
Controls whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden
by `do_resize` in the `preprocess` method.
size (`dict[str, int]` *optional*, defaults to `{"height": 480, "width": 640}`):
Resolution of the output image after `resize` is applied. Only has an effect if `do_resize` is set to
`True`. Can be overridden by `size` in the `preprocess` method.
resample (`Resampling`, *optional*, defaults to `2`):
Resampling filter to use if resizing the image. Can be overridden by `resample` in the `preprocess` method.
do_rescale (`bool`, *optional*, defaults to `True`):
Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by `do_rescale` in
the `preprocess` method.
rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
Scale factor to use if rescaling the image. Can be overridden by `rescale_factor` in the `preprocess`
method.
do_grayscale (`bool`, *optional*, defaults to `False`):
Whether to convert the image to grayscale. Can be overridden by `do_grayscale` in the `preprocess` method.
'''
def __init__(self, do_resize: bool=True, size: Optional[dict[str, int]]=None, resample: PILImageResampling=PILImageResampling.BILINEAR, do_rescale: bool=True, rescale_factor: float=1 / 255, do_grayscale: bool=False, **kwargs) -> None:
pass
def resize(self, image: np.ndarray, size: dict[str, int], data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs):
'''
Resize an image.
Args:
image (`np.ndarray`):
Image to resize.
size (`dict[str, int]`):
Dictionary of the form `{"height": int, "width": int}`, specifying the size of the output image.
data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the output image. If not provided, it will be inferred from the input
image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
'''
pass
def preprocess(self, images, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, resample: Optional[PILImageResampling]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_grayscale: Optional[bool]=None, return_tensors: Optional[Union[str, TensorType]]=None, data_format: ChannelDimension=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> BatchFeature:
'''
Preprocess an image or batch of images.
Args:
images (`ImageInput`):
Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
passing in images with pixel values between 0 and 1, set `do_rescale=False`.
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
Whether to resize the image.
size (`dict[str, int]`, *optional*, defaults to `self.size`):
Size of the output image after `resize` has been applied. If `size["shortest_edge"]` >= 384, the image
is resized to `(size["shortest_edge"], size["shortest_edge"])`. Otherwise, the smaller edge of the
image will be matched to `int(size["shortest_edge"]/ crop_pct)`, after which the image is cropped to
`(size["shortest_edge"], size["shortest_edge"])`. Only has an effect if `do_resize` is set to `True`.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the image values between [0 - 1].
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
do_grayscale (`bool`, *optional*, defaults to `self.do_grayscale`):
Whether to convert the image to grayscale.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- Unset: Use the channel dimension format of the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
'''
pass
def post_process_keypoint_detection(self, outputs: 'SuperPointKeypointDescriptionOutput', target_sizes: Union[TensorType, list[tuple]]) -> list[dict[str, 'torch.Tensor']]:
'''
Converts the raw output of [`SuperPointForKeypointDetection`] into lists of keypoints, scores and descriptors
with coordinates absolute to the original image sizes.
Args:
outputs ([`SuperPointKeypointDescriptionOutput`]):
Raw outputs of the model containing keypoints in a relative (x, y) format, with scores and descriptors.
target_sizes (`torch.Tensor` or `list[tuple[int, int]]`):
Tensor of shape `(batch_size, 2)` or list of tuples (`tuple[int, int]`) containing the target size
`(height, width)` of each image in the batch. This must be the original
image size (before any processing).
Returns:
`list[Dict]`: A list of dictionaries, each dictionary containing the keypoints in absolute format according
to target_sizes, scores and descriptors for an image in the batch as predicted by the model.
'''
pass
| 5
| 4
| 54
| 7
| 28
| 19
| 6
| 0.83
| 1
| 9
| 2
| 0
| 4
| 5
| 4
| 24
| 241
| 32
| 114
| 46
| 80
| 95
| 63
| 17
| 58
| 15
| 3
| 2
| 23
|
5,436
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/superpoint/modeling_superpoint.py
|
transformers.models.superpoint.modeling_superpoint.SuperPointConvBlock
|
from transformers.models.superpoint.configuration_superpoint import SuperPointConfig
from torch import nn
import torch
class SuperPointConvBlock(nn.Module):
def __init__(self, config: SuperPointConfig, in_channels: int, out_channels: int, add_pooling: bool=False) -> None:
super().__init__()
self.conv_a = nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1)
self.conv_b = nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1)
self.relu = nn.ReLU(inplace=True)
self.pool = nn.MaxPool2d(kernel_size=2, stride=2) if add_pooling else None
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.relu(self.conv_a(hidden_states))
hidden_states = self.relu(self.conv_b(hidden_states))
if self.pool is not None:
hidden_states = self.pool(hidden_states)
return hidden_states
|
class SuperPointConvBlock(nn.Module):
def __init__(self, config: SuperPointConfig, in_channels: int, out_channels: int, add_pooling: bool=False) -> None:
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 13
| 0
| 13
| 0
| 2
| 0
| 1
| 5
| 0
| 0
| 2
| 4
| 2
| 12
| 28
| 1
| 27
| 9
| 22
| 0
| 13
| 7
| 10
| 2
| 1
| 1
| 4
|
5,437
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/superpoint/modeling_superpoint.py
|
transformers.models.superpoint.modeling_superpoint.SuperPointDescriptorDecoder
|
from transformers.models.superpoint.configuration_superpoint import SuperPointConfig
import torch
from torch import nn
class SuperPointDescriptorDecoder(nn.Module):
"""
The SuperPointDescriptorDecoder uses the outputs of both the SuperPointEncoder and the
SuperPointInterestPointDecoder to compute the descriptors at the keypoints locations.
The descriptors are first computed by a convolutional layer, then normalized to have a norm of 1. The descriptors
are then interpolated at the keypoints locations.
"""
def __init__(self, config: SuperPointConfig) -> None:
super().__init__()
self.relu = nn.ReLU(inplace=True)
self.pool = nn.MaxPool2d(kernel_size=2, stride=2)
self.conv_descriptor_a = nn.Conv2d(config.encoder_hidden_sizes[-1], config.decoder_hidden_size, kernel_size=3, stride=1, padding=1)
self.conv_descriptor_b = nn.Conv2d(config.decoder_hidden_size, config.descriptor_decoder_dim, kernel_size=1, stride=1, padding=0)
def forward(self, encoded: torch.Tensor, keypoints: torch.Tensor) -> torch.Tensor:
"""Based on the encoder output and the keypoints, compute the descriptors for each keypoint"""
descriptors = self.conv_descriptor_b(self.relu(self.conv_descriptor_a(encoded)))
descriptors = nn.functional.normalize(descriptors, p=2, dim=1)
descriptors = self._sample_descriptors(keypoints[None], descriptors[0][None], 8)[0]
descriptors = torch.transpose(descriptors, 0, 1)
return descriptors
@staticmethod
def _sample_descriptors(keypoints, descriptors, scale: int=8) -> torch.Tensor:
"""Interpolate descriptors at keypoint locations"""
batch_size, num_channels, height, width = descriptors.shape
keypoints = keypoints - scale / 2 + 0.5
divisor = torch.tensor([[width * scale - scale / 2 - 0.5, height * scale - scale / 2 - 0.5]])
divisor = divisor.to(keypoints)
keypoints /= divisor
keypoints = keypoints * 2 - 1
kwargs = {'align_corners': True}
keypoints = keypoints.view(batch_size, 1, -1, 2)
descriptors = nn.functional.grid_sample(descriptors, keypoints, mode='bilinear', **kwargs)
descriptors = descriptors.reshape(batch_size, num_channels, -1)
descriptors = nn.functional.normalize(descriptors, p=2, dim=1)
return descriptors
|
class SuperPointDescriptorDecoder(nn.Module):
'''
The SuperPointDescriptorDecoder uses the outputs of both the SuperPointEncoder and the
SuperPointInterestPointDecoder to compute the descriptors at the keypoints locations.
The descriptors are first computed by a convolutional layer, then normalized to have a norm of 1. The descriptors
are then interpolated at the keypoints locations.
'''
def __init__(self, config: SuperPointConfig) -> None:
pass
def forward(self, encoded: torch.Tensor, keypoints: torch.Tensor) -> torch.Tensor:
'''Based on the encoder output and the keypoints, compute the descriptors for each keypoint'''
pass
@staticmethod
def _sample_descriptors(keypoints, descriptors, scale: int=8) -> torch.Tensor:
'''Interpolate descriptors at keypoint locations'''
pass
| 5
| 3
| 15
| 1
| 12
| 2
| 1
| 0.31
| 1
| 4
| 0
| 0
| 2
| 4
| 3
| 13
| 58
| 8
| 39
| 13
| 34
| 12
| 26
| 12
| 22
| 1
| 1
| 0
| 3
|
5,438
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/superpoint/modeling_superpoint.py
|
transformers.models.superpoint.modeling_superpoint.SuperPointEncoder
|
from transformers.models.superpoint.configuration_superpoint import SuperPointConfig
from torch import nn
from typing import Optional, Union
from transformers.modeling_outputs import BaseModelOutputWithNoAttention
class SuperPointEncoder(nn.Module):
"""
SuperPoint encoder module. It is made of 4 convolutional layers with ReLU activation and max pooling, reducing the
dimensionality of the image.
"""
def __init__(self, config: SuperPointConfig) -> None:
super().__init__()
self.input_dim = 1
conv_blocks = []
conv_blocks.append(SuperPointConvBlock(config, self.input_dim, config.encoder_hidden_sizes[0], add_pooling=True))
for i in range(1, len(config.encoder_hidden_sizes) - 1):
conv_blocks.append(SuperPointConvBlock(config, config.encoder_hidden_sizes[i - 1], config.encoder_hidden_sizes[i], add_pooling=True))
conv_blocks.append(SuperPointConvBlock(config, config.encoder_hidden_sizes[-2], config.encoder_hidden_sizes[-1], add_pooling=False))
self.conv_blocks = nn.ModuleList(conv_blocks)
def forward(self, input, output_hidden_states: Optional[bool]=False, return_dict: Optional[bool]=True) -> Union[tuple, BaseModelOutputWithNoAttention]:
all_hidden_states = () if output_hidden_states else None
for conv_block in self.conv_blocks:
input = conv_block(input)
if output_hidden_states:
all_hidden_states = all_hidden_states + (input,)
output = input
if not return_dict:
return tuple((v for v in [output, all_hidden_states] if v is not None))
return BaseModelOutputWithNoAttention(last_hidden_state=output, hidden_states=all_hidden_states)
|
class SuperPointEncoder(nn.Module):
'''
SuperPoint encoder module. It is made of 4 convolutional layers with ReLU activation and max pooling, reducing the
dimensionality of the image.
'''
def __init__(self, config: SuperPointConfig) -> None:
pass
def forward(self, input, output_hidden_states: Optional[bool]=False, return_dict: Optional[bool]=True) -> Union[tuple, BaseModelOutputWithNoAttention]:
pass
| 3
| 1
| 21
| 2
| 19
| 1
| 4
| 0.13
| 1
| 7
| 1
| 0
| 2
| 2
| 2
| 12
| 48
| 5
| 38
| 15
| 30
| 5
| 20
| 10
| 17
| 5
| 1
| 2
| 7
|
5,439
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/superpoint/modeling_superpoint.py
|
transformers.models.superpoint.modeling_superpoint.SuperPointForKeypointDetection
|
from transformers.models.superpoint.configuration_superpoint import SuperPointConfig
import torch
from typing import Optional, Union
from ...utils import ModelOutput, auto_docstring, logging
@auto_docstring(custom_intro='\n SuperPoint model outputting keypoints and descriptors.\n ')
class SuperPointForKeypointDetection(SuperPointPreTrainedModel):
"""
SuperPoint model. It consists of a SuperPointEncoder, a SuperPointInterestPointDecoder and a
SuperPointDescriptorDecoder. SuperPoint was proposed in `SuperPoint: Self-Supervised Interest Point Detection and
Description <https://huggingface.co/papers/1712.07629>`__ by Daniel DeTone, Tomasz Malisiewicz, and Andrew Rabinovich. It
is a fully convolutional neural network that extracts keypoints and descriptors from an image. It is trained in a
self-supervised manner, using a combination of a photometric loss and a loss based on the homographic adaptation of
keypoints. It is made of a convolutional encoder and two decoders: one for keypoints and one for descriptors.
"""
def __init__(self, config: SuperPointConfig) -> None:
super().__init__(config)
self.config = config
self.encoder = SuperPointEncoder(config)
self.keypoint_decoder = SuperPointInterestPointDecoder(config)
self.descriptor_decoder = SuperPointDescriptorDecoder(config)
self.post_init()
@auto_docstring
def forward(self, pixel_values: torch.FloatTensor, labels: Optional[torch.LongTensor]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, SuperPointKeypointDescriptionOutput]:
"""
Examples:
```python
>>> from transformers import AutoImageProcessor, SuperPointForKeypointDetection
>>> import torch
>>> from PIL import Image
>>> import requests
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> processor = AutoImageProcessor.from_pretrained("magic-leap-community/superpoint")
>>> model = SuperPointForKeypointDetection.from_pretrained("magic-leap-community/superpoint")
>>> inputs = processor(image, return_tensors="pt")
>>> outputs = model(**inputs)
```"""
loss = None
if labels is not None:
raise ValueError('SuperPoint does not support training for now.')
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
pixel_values = self.extract_one_channel_pixel_values(pixel_values)
batch_size, _, height, width = pixel_values.shape
encoder_outputs = self.encoder(pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict)
last_hidden_state = encoder_outputs[0]
list_keypoints_scores = [self.keypoint_decoder(last_hidden_state[None, ...]) for last_hidden_state in last_hidden_state]
list_keypoints = [keypoints_scores[0] for keypoints_scores in list_keypoints_scores]
list_scores = [keypoints_scores[1] for keypoints_scores in list_keypoints_scores]
list_descriptors = [self.descriptor_decoder(last_hidden_state[None, ...], keypoints[None, ...]) for last_hidden_state, keypoints in zip(last_hidden_state, list_keypoints)]
maximum_num_keypoints = max((keypoints.shape[0] for keypoints in list_keypoints))
keypoints = torch.zeros((batch_size, maximum_num_keypoints, 2), device=pixel_values.device)
scores = torch.zeros((batch_size, maximum_num_keypoints), device=pixel_values.device)
descriptors = torch.zeros((batch_size, maximum_num_keypoints, self.config.descriptor_decoder_dim), device=pixel_values.device)
mask = torch.zeros((batch_size, maximum_num_keypoints), device=pixel_values.device, dtype=torch.int)
for i, (_keypoints, _scores, _descriptors) in enumerate(zip(list_keypoints, list_scores, list_descriptors)):
keypoints[i, :_keypoints.shape[0]] = _keypoints
scores[i, :_scores.shape[0]] = _scores
descriptors[i, :_descriptors.shape[0]] = _descriptors
mask[i, :_scores.shape[0]] = 1
keypoints = keypoints / torch.tensor([width, height], device=keypoints.device)
hidden_states = encoder_outputs[1] if output_hidden_states else None
if not return_dict:
return tuple((v for v in [loss, keypoints, scores, descriptors, mask, hidden_states] if v is not None))
return SuperPointKeypointDescriptionOutput(loss=loss, keypoints=keypoints, scores=scores, descriptors=descriptors, mask=mask, hidden_states=hidden_states)
|
@auto_docstring(custom_intro='\n SuperPoint model outputting keypoints and descriptors.\n ')
class SuperPointForKeypointDetection(SuperPointPreTrainedModel):
'''
SuperPoint model. It consists of a SuperPointEncoder, a SuperPointInterestPointDecoder and a
SuperPointDescriptorDecoder. SuperPoint was proposed in `SuperPoint: Self-Supervised Interest Point Detection and
Description <https://huggingface.co/papers/1712.07629>`__ by Daniel DeTone, Tomasz Malisiewicz, and Andrew Rabinovich. It
is a fully convolutional neural network that extracts keypoints and descriptors from an image. It is trained in a
self-supervised manner, using a combination of a photometric loss and a loss based on the homographic adaptation of
keypoints. It is made of a convolutional encoder and two decoders: one for keypoints and one for descriptors.
'''
def __init__(self, config: SuperPointConfig) -> None:
pass
@auto_docstring
def forward(self, pixel_values: torch.FloatTensor, labels: Optional[torch.LongTensor]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, SuperPointKeypointDescriptionOutput]:
'''
Examples:
```python
>>> from transformers import AutoImageProcessor, SuperPointForKeypointDetection
>>> import torch
>>> from PIL import Image
>>> import requests
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> processor = AutoImageProcessor.from_pretrained("magic-leap-community/superpoint")
>>> model = SuperPointForKeypointDetection.from_pretrained("magic-leap-community/superpoint")
>>> inputs = processor(image, return_tensors="pt")
>>> outputs = model(**inputs)
```'''
pass
| 5
| 2
| 50
| 11
| 32
| 8
| 4
| 0.35
| 1
| 12
| 4
| 0
| 2
| 4
| 2
| 131
| 111
| 23
| 65
| 28
| 55
| 23
| 37
| 21
| 34
| 7
| 3
| 1
| 8
|
5,440
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/superpoint/modeling_superpoint.py
|
transformers.models.superpoint.modeling_superpoint.SuperPointInterestPointDecoder
|
from torch import nn
import torch
from transformers.models.superpoint.configuration_superpoint import SuperPointConfig
class SuperPointInterestPointDecoder(nn.Module):
"""
The SuperPointInterestPointDecoder uses the output of the SuperPointEncoder to compute the keypoint with scores.
The scores are first computed by a convolutional layer, then a softmax is applied to get a probability distribution
over the 65 possible keypoint classes. The keypoints are then extracted from the scores by thresholding and
non-maximum suppression. Post-processing is then applied to remove keypoints too close to the image borders as well
as to keep only the k keypoints with highest score.
"""
def __init__(self, config: SuperPointConfig) -> None:
super().__init__()
self.keypoint_threshold = config.keypoint_threshold
self.max_keypoints = config.max_keypoints
self.nms_radius = config.nms_radius
self.border_removal_distance = config.border_removal_distance
self.relu = nn.ReLU(inplace=True)
self.pool = nn.MaxPool2d(kernel_size=2, stride=2)
self.conv_score_a = nn.Conv2d(config.encoder_hidden_sizes[-1], config.decoder_hidden_size, kernel_size=3, stride=1, padding=1)
self.conv_score_b = nn.Conv2d(config.decoder_hidden_size, config.keypoint_decoder_dim, kernel_size=1, stride=1, padding=0)
def forward(self, encoded: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]:
scores = self._get_pixel_scores(encoded)
keypoints, scores = self._extract_keypoints(scores)
return (keypoints, scores)
def _get_pixel_scores(self, encoded: torch.Tensor) -> torch.Tensor:
"""Based on the encoder output, compute the scores for each pixel of the image"""
scores = self.relu(self.conv_score_a(encoded))
scores = self.conv_score_b(scores)
scores = nn.functional.softmax(scores, 1)[:, :-1]
batch_size, _, height, width = scores.shape
scores = scores.permute(0, 2, 3, 1).reshape(batch_size, height, width, 8, 8)
scores = scores.permute(0, 1, 3, 2, 4).reshape(batch_size, height * 8, width * 8)
scores = simple_nms(scores, self.nms_radius)
return scores
def _extract_keypoints(self, scores: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]:
"""
Based on their scores, extract the pixels that represent the keypoints that will be used for descriptors computation.
The keypoints are in the form of relative (x, y) coordinates.
"""
_, height, width = scores.shape
keypoints = torch.nonzero(scores[0] > self.keypoint_threshold)
scores = scores[0][tuple(keypoints.t())]
keypoints, scores = remove_keypoints_from_borders(keypoints, scores, self.border_removal_distance, height * 8, width * 8)
if self.max_keypoints >= 0:
keypoints, scores = top_k_keypoints(keypoints, scores, self.max_keypoints)
keypoints = torch.flip(keypoints, [1]).to(scores.dtype)
return (keypoints, scores)
|
class SuperPointInterestPointDecoder(nn.Module):
'''
The SuperPointInterestPointDecoder uses the output of the SuperPointEncoder to compute the keypoint with scores.
The scores are first computed by a convolutional layer, then a softmax is applied to get a probability distribution
over the 65 possible keypoint classes. The keypoints are then extracted from the scores by thresholding and
non-maximum suppression. Post-processing is then applied to remove keypoints too close to the image borders as well
as to keep only the k keypoints with highest score.
'''
def __init__(self, config: SuperPointConfig) -> None:
pass
def forward(self, encoded: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]:
pass
def _get_pixel_scores(self, encoded: torch.Tensor) -> torch.Tensor:
'''Based on the encoder output, compute the scores for each pixel of the image'''
pass
def _extract_keypoints(self, scores: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]:
'''
Based on their scores, extract the pixels that represent the keypoints that will be used for descriptors computation.
The keypoints are in the form of relative (x, y) coordinates.
'''
pass
| 5
| 3
| 15
| 2
| 11
| 2
| 1
| 0.37
| 1
| 4
| 0
| 0
| 4
| 8
| 4
| 14
| 70
| 11
| 43
| 19
| 38
| 16
| 33
| 19
| 28
| 2
| 1
| 1
| 5
|
5,441
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/superpoint/modeling_superpoint.py
|
transformers.models.superpoint.modeling_superpoint.SuperPointKeypointDescriptionOutput
|
from dataclasses import dataclass
from typing import Optional, Union
import torch
from ...utils import ModelOutput, auto_docstring, logging
@dataclass
@auto_docstring(custom_intro='\n Base class for outputs of image point description models. Due to the nature of keypoint detection, the number of\n keypoints is not fixed and can vary from image to image, which makes batching non-trivial. In the batch of images,\n the maximum number of keypoints is set as the dimension of the keypoints, scores and descriptors tensors. The mask\n tensor is used to indicate which values in the keypoints, scores and descriptors tensors are keypoint information\n and which are padding.\n ')
class SuperPointKeypointDescriptionOutput(ModelOutput):
"""
loss (`torch.FloatTensor` of shape `(1,)`, *optional*):
Loss computed during training.
keypoints (`torch.FloatTensor` of shape `(batch_size, num_keypoints, 2)`):
Relative (x, y) coordinates of predicted keypoints in a given image.
scores (`torch.FloatTensor` of shape `(batch_size, num_keypoints)`):
Scores of predicted keypoints.
descriptors (`torch.FloatTensor` of shape `(batch_size, num_keypoints, descriptor_size)`):
Descriptors of predicted keypoints.
mask (`torch.BoolTensor` of shape `(batch_size, num_keypoints)`):
Mask indicating which values in keypoints, scores and descriptors are keypoint information.
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or
when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
one for the output of each stage) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states
(also called feature maps) of the model at the output of each stage.
"""
loss: Optional[torch.FloatTensor] = None
keypoints: Optional[torch.IntTensor] = None
scores: Optional[torch.FloatTensor] = None
descriptors: Optional[torch.FloatTensor] = None
mask: Optional[torch.BoolTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor]] = None
|
@dataclass
@auto_docstring(custom_intro='\n Base class for outputs of image point description models. Due to the nature of keypoint detection, the number of\n keypoints is not fixed and can vary from image to image, which makes batching non-trivial. In the batch of images,\n the maximum number of keypoints is set as the dimension of the keypoints, scores and descriptors tensors. The mask\n tensor is used to indicate which values in the keypoints, scores and descriptors tensors are keypoint information\n and which are padding.\n ')
class SuperPointKeypointDescriptionOutput(ModelOutput):
'''
loss (`torch.FloatTensor` of shape `(1,)`, *optional*):
Loss computed during training.
keypoints (`torch.FloatTensor` of shape `(batch_size, num_keypoints, 2)`):
Relative (x, y) coordinates of predicted keypoints in a given image.
scores (`torch.FloatTensor` of shape `(batch_size, num_keypoints)`):
Scores of predicted keypoints.
descriptors (`torch.FloatTensor` of shape `(batch_size, num_keypoints, descriptor_size)`):
Descriptors of predicted keypoints.
mask (`torch.BoolTensor` of shape `(batch_size, num_keypoints)`):
Mask indicating which values in keypoints, scores and descriptors are keypoint information.
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or
when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
one for the output of each stage) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states
(also called feature maps) of the model at the output of each stage.
'''
pass
| 3
| 1
| 0
| 0
| 0
| 0
| 0
| 3.29
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 32
| 2
| 7
| 7
| 6
| 23
| 7
| 7
| 6
| 0
| 1
| 0
| 0
|
5,442
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/superpoint/modeling_superpoint.py
|
transformers.models.superpoint.modeling_superpoint.SuperPointPreTrainedModel
|
import torch
from torch import nn
from transformers import PreTrainedModel
from typing import Optional, Union
from transformers.models.superpoint.configuration_superpoint import SuperPointConfig
from ...utils import ModelOutput, auto_docstring, logging
@auto_docstring
class SuperPointPreTrainedModel(PreTrainedModel):
config: SuperPointConfig
base_model_prefix = 'superpoint'
main_input_name = 'pixel_values'
supports_gradient_checkpointing = False
def _init_weights(self, module: Union[nn.Linear, nn.Conv2d, nn.LayerNorm]) -> None:
"""Initialize the weights"""
if isinstance(module, (nn.Linear, nn.Conv2d)):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
def extract_one_channel_pixel_values(self, pixel_values: torch.FloatTensor) -> torch.FloatTensor:
"""
Assuming pixel_values has shape (batch_size, 3, height, width), and that all channels values are the same,
extract the first channel value to get a tensor of shape (batch_size, 1, height, width) for SuperPoint. This is
a workaround for the issue discussed in :
https://github.com/huggingface/transformers/pull/25786#issuecomment-1730176446
Args:
pixel_values: torch.FloatTensor of shape (batch_size, 3, height, width)
Returns:
pixel_values: torch.FloatTensor of shape (batch_size, 1, height, width)
"""
return pixel_values[:, 0, :, :][:, None, :, :]
|
@auto_docstring
class SuperPointPreTrainedModel(PreTrainedModel):
def _init_weights(self, module: Union[nn.Linear, nn.Conv2d, nn.LayerNorm]) -> None:
'''Initialize the weights'''
pass
def extract_one_channel_pixel_values(self, pixel_values: torch.FloatTensor) -> torch.FloatTensor:
'''
Assuming pixel_values has shape (batch_size, 3, height, width), and that all channels values are the same,
extract the first channel value to get a tensor of shape (batch_size, 1, height, width) for SuperPoint. This is
a workaround for the issue discussed in :
https://github.com/huggingface/transformers/pull/25786#issuecomment-1730176446
Args:
pixel_values: torch.FloatTensor of shape (batch_size, 3, height, width)
Returns:
pixel_values: torch.FloatTensor of shape (batch_size, 1, height, width)
'''
pass
| 4
| 2
| 13
| 2
| 5
| 7
| 3
| 1.13
| 1
| 0
| 0
| 1
| 2
| 0
| 2
| 129
| 38
| 6
| 15
| 7
| 12
| 17
| 14
| 7
| 11
| 4
| 2
| 2
| 5
|
5,443
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/swiftformer/configuration_swiftformer.py
|
transformers.models.swiftformer.configuration_swiftformer.SwiftFormerConfig
|
from ...configuration_utils import PretrainedConfig
class SwiftFormerConfig(PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`SwiftFormerModel`]. It is used to instantiate an
SwiftFormer model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the SwiftFormer
[MBZUAI/swiftformer-xs](https://huggingface.co/MBZUAI/swiftformer-xs) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
image_size (`int`, *optional*, defaults to 224):
The size (resolution) of each image
num_channels (`int`, *optional*, defaults to 3):
The number of input channels
depths (`list[int]`, *optional*, defaults to `[3, 3, 6, 4]`):
Depth of each stage
embed_dims (`list[int]`, *optional*, defaults to `[48, 56, 112, 220]`):
The embedding dimension at each stage
mlp_ratio (`int`, *optional*, defaults to 4):
Ratio of size of the hidden dimensionality of an MLP to the dimensionality of its input.
downsamples (`list[bool]`, *optional*, defaults to `[True, True, True, True]`):
Whether or not to downsample inputs between two stages.
hidden_act (`str`, *optional*, defaults to `"gelu"`):
The non-linear activation function (string). `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported.
down_patch_size (`int`, *optional*, defaults to 3):
The size of patches in downsampling layers.
down_stride (`int`, *optional*, defaults to 2):
The stride of convolution kernels in downsampling layers.
down_pad (`int`, *optional*, defaults to 1):
Padding in downsampling layers.
drop_path_rate (`float`, *optional*, defaults to 0.0):
Rate at which to increase dropout probability in DropPath.
drop_mlp_rate (`float`, *optional*, defaults to 0.0):
Dropout rate for the MLP component of SwiftFormer.
drop_conv_encoder_rate (`float`, *optional*, defaults to 0.0):
Dropout rate for the ConvEncoder component of SwiftFormer.
use_layer_scale (`bool`, *optional*, defaults to `True`):
Whether to scale outputs from token mixers.
layer_scale_init_value (`float`, *optional*, defaults to 1e-05):
Factor by which outputs from token mixers are scaled.
batch_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the batch normalization layers.
Example:
```python
>>> from transformers import SwiftFormerConfig, SwiftFormerModel
>>> # Initializing a SwiftFormer swiftformer-base-patch16-224 style configuration
>>> configuration = SwiftFormerConfig()
>>> # Initializing a model (with random weights) from the swiftformer-base-patch16-224 style configuration
>>> model = SwiftFormerModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = 'swiftformer'
def __init__(self, image_size=224, num_channels=3, depths=[3, 3, 6, 4], embed_dims=[48, 56, 112, 220], mlp_ratio=4, downsamples=[True, True, True, True], hidden_act='gelu', down_patch_size=3, down_stride=2, down_pad=1, drop_path_rate=0.0, drop_mlp_rate=0.0, drop_conv_encoder_rate=0.0, use_layer_scale=True, layer_scale_init_value=1e-05, batch_norm_eps=1e-05, **kwargs):
super().__init__(**kwargs)
self.image_size = image_size
self.num_channels = num_channels
self.depths = depths
self.embed_dims = embed_dims
self.mlp_ratio = mlp_ratio
self.downsamples = downsamples
self.hidden_act = hidden_act
self.down_patch_size = down_patch_size
self.down_stride = down_stride
self.down_pad = down_pad
self.drop_path_rate = drop_path_rate
self.drop_mlp_rate = drop_mlp_rate
self.drop_conv_encoder_rate = drop_conv_encoder_rate
self.use_layer_scale = use_layer_scale
self.layer_scale_init_value = layer_scale_init_value
self.batch_norm_eps = batch_norm_eps
|
class SwiftFormerConfig(PretrainedConfig):
'''
This is the configuration class to store the configuration of a [`SwiftFormerModel`]. It is used to instantiate an
SwiftFormer model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the SwiftFormer
[MBZUAI/swiftformer-xs](https://huggingface.co/MBZUAI/swiftformer-xs) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
image_size (`int`, *optional*, defaults to 224):
The size (resolution) of each image
num_channels (`int`, *optional*, defaults to 3):
The number of input channels
depths (`list[int]`, *optional*, defaults to `[3, 3, 6, 4]`):
Depth of each stage
embed_dims (`list[int]`, *optional*, defaults to `[48, 56, 112, 220]`):
The embedding dimension at each stage
mlp_ratio (`int`, *optional*, defaults to 4):
Ratio of size of the hidden dimensionality of an MLP to the dimensionality of its input.
downsamples (`list[bool]`, *optional*, defaults to `[True, True, True, True]`):
Whether or not to downsample inputs between two stages.
hidden_act (`str`, *optional*, defaults to `"gelu"`):
The non-linear activation function (string). `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported.
down_patch_size (`int`, *optional*, defaults to 3):
The size of patches in downsampling layers.
down_stride (`int`, *optional*, defaults to 2):
The stride of convolution kernels in downsampling layers.
down_pad (`int`, *optional*, defaults to 1):
Padding in downsampling layers.
drop_path_rate (`float`, *optional*, defaults to 0.0):
Rate at which to increase dropout probability in DropPath.
drop_mlp_rate (`float`, *optional*, defaults to 0.0):
Dropout rate for the MLP component of SwiftFormer.
drop_conv_encoder_rate (`float`, *optional*, defaults to 0.0):
Dropout rate for the ConvEncoder component of SwiftFormer.
use_layer_scale (`bool`, *optional*, defaults to `True`):
Whether to scale outputs from token mixers.
layer_scale_init_value (`float`, *optional*, defaults to 1e-05):
Factor by which outputs from token mixers are scaled.
batch_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the batch normalization layers.
Example:
```python
>>> from transformers import SwiftFormerConfig, SwiftFormerModel
>>> # Initializing a SwiftFormer swiftformer-base-patch16-224 style configuration
>>> configuration = SwiftFormerConfig()
>>> # Initializing a model (with random weights) from the swiftformer-base-patch16-224 style configuration
>>> model = SwiftFormerModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```'''
def __init__(self, image_size=224, num_channels=3, depths=[3, 3, 6, 4], embed_dims=[48, 56, 112, 220], mlp_ratio=4, downsamples=[True, True, True, True], hidden_act='gelu', down_patch_size=3, down_stride=2, down_pad=1, drop_path_rate=0.0, drop_mlp_rate=0.0, drop_conv_encoder_rate=0.0, use_layer_scale=True, layer_scale_init_value=1e-05, batch_norm_eps=1e-05, **kwargs):
pass
| 2
| 1
| 37
| 0
| 37
| 0
| 1
| 1.28
| 1
| 1
| 0
| 0
| 1
| 16
| 1
| 1
| 100
| 11
| 39
| 38
| 18
| 50
| 20
| 19
| 18
| 1
| 1
| 0
| 1
|
5,444
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/swiftformer/configuration_swiftformer.py
|
transformers.models.swiftformer.configuration_swiftformer.SwiftFormerOnnxConfig
|
from collections import OrderedDict
from ...onnx import OnnxConfig
from collections.abc import Mapping
from packaging import version
class SwiftFormerOnnxConfig(OnnxConfig):
torch_onnx_minimum_version = version.parse('1.11')
@property
def inputs(self) -> Mapping[str, Mapping[int, str]]:
return OrderedDict([('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'})])
@property
def atol_for_validation(self) -> float:
return 0.0001
|
class SwiftFormerOnnxConfig(OnnxConfig):
@property
def inputs(self) -> Mapping[str, Mapping[int, str]]:
pass
@property
def atol_for_validation(self) -> float:
pass
| 5
| 0
| 4
| 0
| 4
| 0
| 1
| 0
| 1
| 4
| 0
| 0
| 2
| 0
| 2
| 2
| 14
| 2
| 12
| 6
| 7
| 0
| 6
| 4
| 3
| 1
| 1
| 0
| 2
|
5,445
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/swiftformer/modeling_swiftformer.py
|
transformers.models.swiftformer.modeling_swiftformer.SwiftFormerConvEncoder
|
from .configuration_swiftformer import SwiftFormerConfig
from torch import nn
import torch
class SwiftFormerConvEncoder(nn.Module):
"""
`SwiftFormerConvEncoder` with 3*3 and 1*1 convolutions.
Input: tensor of shape `[batch_size, channels, height, width]`
Output: tensor of shape `[batch_size, channels, height, width]`
"""
def __init__(self, config: SwiftFormerConfig, dim: int):
super().__init__()
hidden_dim = int(config.mlp_ratio * dim)
self.depth_wise_conv = nn.Conv2d(dim, dim, kernel_size=3, padding=1, groups=dim)
self.norm = nn.BatchNorm2d(dim, eps=config.batch_norm_eps)
self.point_wise_conv1 = nn.Conv2d(dim, hidden_dim, kernel_size=1)
self.act = nn.GELU()
self.point_wise_conv2 = nn.Conv2d(hidden_dim, dim, kernel_size=1)
self.drop_path = nn.Dropout(p=config.drop_conv_encoder_rate)
self.layer_scale = nn.Parameter(torch.ones(dim).unsqueeze(-1).unsqueeze(-1), requires_grad=True)
def forward(self, x):
input = x
x = self.depth_wise_conv(x)
x = self.norm(x)
x = self.point_wise_conv1(x)
x = self.act(x)
x = self.point_wise_conv2(x)
x = input + self.drop_path(self.layer_scale * x)
return x
|
class SwiftFormerConvEncoder(nn.Module):
'''
`SwiftFormerConvEncoder` with 3*3 and 1*1 convolutions.
Input: tensor of shape `[batch_size, channels, height, width]`
Output: tensor of shape `[batch_size, channels, height, width]`
'''
def __init__(self, config: SwiftFormerConfig, dim: int):
pass
def forward(self, x):
pass
| 3
| 1
| 10
| 1
| 10
| 0
| 1
| 0.25
| 1
| 3
| 1
| 0
| 2
| 7
| 2
| 12
| 30
| 5
| 20
| 11
| 17
| 5
| 20
| 11
| 17
| 1
| 1
| 0
| 2
|
5,446
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/swiftformer/modeling_swiftformer.py
|
transformers.models.swiftformer.modeling_swiftformer.SwiftFormerDropPath
|
from .configuration_swiftformer import SwiftFormerConfig
from torch import nn
import torch
class SwiftFormerDropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
def __init__(self, config: SwiftFormerConfig) -> None:
super().__init__()
self.drop_prob = config.drop_path_rate
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
return drop_path(hidden_states, self.drop_prob, self.training)
def extra_repr(self) -> str:
return f'p={self.drop_prob}'
|
class SwiftFormerDropPath(nn.Module):
'''Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).'''
def __init__(self, config: SwiftFormerConfig) -> None:
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
def extra_repr(self) -> str:
pass
| 4
| 1
| 2
| 0
| 2
| 0
| 1
| 0.13
| 1
| 4
| 1
| 0
| 3
| 1
| 3
| 13
| 12
| 3
| 8
| 5
| 4
| 1
| 8
| 5
| 4
| 1
| 1
| 0
| 3
|
5,447
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/swiftformer/modeling_swiftformer.py
|
transformers.models.swiftformer.modeling_swiftformer.SwiftFormerEfficientAdditiveAttention
|
from torch import nn
from .configuration_swiftformer import SwiftFormerConfig
import torch
class SwiftFormerEfficientAdditiveAttention(nn.Module):
"""
Efficient Additive Attention module for SwiftFormer.
Input: tensor of shape `[batch_size, channels, height, width]`
Output: tensor of shape `[batch_size, channels, height, width]`
"""
def __init__(self, config: SwiftFormerConfig, dim: int=512):
super().__init__()
self.to_query = nn.Linear(dim, dim)
self.to_key = nn.Linear(dim, dim)
self.w_g = nn.Parameter(torch.randn(dim, 1))
self.scale_factor = dim ** (-0.5)
self.proj = nn.Linear(dim, dim)
self.final = nn.Linear(dim, dim)
def forward(self, x):
query = self.to_query(x)
key = self.to_key(x)
query = torch.nn.functional.normalize(query, dim=-1)
key = torch.nn.functional.normalize(key, dim=-1)
query_weight = query @ self.w_g
scaled_query_weight = query_weight * self.scale_factor
scaled_query_weight = scaled_query_weight.softmax(dim=-1)
global_queries = torch.sum(scaled_query_weight * query, dim=1)
global_queries = global_queries.unsqueeze(1).repeat(1, key.shape[1], 1)
out = self.proj(global_queries * key) + query
out = self.final(out)
return out
|
class SwiftFormerEfficientAdditiveAttention(nn.Module):
'''
Efficient Additive Attention module for SwiftFormer.
Input: tensor of shape `[batch_size, channels, height, width]`
Output: tensor of shape `[batch_size, channels, height, width]`
'''
def __init__(self, config: SwiftFormerConfig, dim: int=512):
pass
def forward(self, x):
pass
| 3
| 1
| 14
| 4
| 11
| 0
| 1
| 0.23
| 1
| 3
| 1
| 0
| 2
| 6
| 2
| 12
| 38
| 11
| 22
| 15
| 19
| 5
| 22
| 15
| 19
| 1
| 1
| 0
| 2
|
5,448
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/swiftformer/modeling_swiftformer.py
|
transformers.models.swiftformer.modeling_swiftformer.SwiftFormerEmbeddings
|
from torch import nn
import collections.abc
from .configuration_swiftformer import SwiftFormerConfig
class SwiftFormerEmbeddings(nn.Module):
"""
Embeddings layer consisting of a single 2D convolutional and batch normalization layer.
Input: tensor of shape `[batch_size, channels, height, width]`
Output: tensor of shape `[batch_size, channels, height/stride, width/stride]`
"""
def __init__(self, config: SwiftFormerConfig, index: int):
super().__init__()
patch_size = config.down_patch_size
stride = config.down_stride
padding = config.down_pad
embed_dims = config.embed_dims
in_chans = embed_dims[index]
embed_dim = embed_dims[index + 1]
patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size)
stride = stride if isinstance(stride, collections.abc.Iterable) else (stride, stride)
padding = padding if isinstance(padding, collections.abc.Iterable) else (padding, padding)
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=stride, padding=padding)
self.norm = nn.BatchNorm2d(embed_dim, eps=config.batch_norm_eps)
def forward(self, x):
x = self.proj(x)
x = self.norm(x)
return x
|
class SwiftFormerEmbeddings(nn.Module):
'''
Embeddings layer consisting of a single 2D convolutional and batch normalization layer.
Input: tensor of shape `[batch_size, channels, height, width]`
Output: tensor of shape `[batch_size, channels, height/stride, width/stride]`
'''
def __init__(self, config: SwiftFormerConfig, index: int):
pass
def forward(self, x):
pass
| 3
| 1
| 11
| 2
| 9
| 0
| 3
| 0.28
| 1
| 4
| 1
| 0
| 2
| 2
| 2
| 12
| 31
| 8
| 18
| 11
| 15
| 5
| 18
| 11
| 15
| 4
| 1
| 0
| 5
|
5,449
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/swiftformer/modeling_swiftformer.py
|
transformers.models.swiftformer.modeling_swiftformer.SwiftFormerEncoder
|
from typing import Optional, Union
import torch
from torch import nn
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from .configuration_swiftformer import SwiftFormerConfig
class SwiftFormerEncoder(nn.Module):
def __init__(self, config: SwiftFormerConfig) -> None:
super().__init__()
self.config = config
embed_dims = config.embed_dims
downsamples = config.downsamples
layer_depths = config.depths
network = []
for i in range(len(layer_depths)):
stage = SwiftFormerStage(config=config, index=i)
network.append(stage)
if i >= len(layer_depths) - 1:
break
if downsamples[i] or embed_dims[i] != embed_dims[i + 1]:
network.append(SwiftFormerEmbeddings(config, index=i))
self.network = nn.ModuleList(network)
self.gradient_checkpointing = False
def forward(self, hidden_states: torch.Tensor, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutputWithNoAttention]:
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
all_hidden_states = (hidden_states,) if output_hidden_states else None
for block in self.network:
hidden_states = block(hidden_states)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple((v for v in [hidden_states, all_hidden_states] if v is not None))
return BaseModelOutputWithNoAttention(last_hidden_state=hidden_states, hidden_states=all_hidden_states)
|
class SwiftFormerEncoder(nn.Module):
def __init__(self, config: SwiftFormerConfig) -> None:
pass
def forward(self, hidden_states: torch.Tensor, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutputWithNoAttention]:
pass
| 3
| 0
| 23
| 4
| 19
| 1
| 6
| 0.05
| 1
| 9
| 4
| 0
| 2
| 3
| 2
| 12
| 48
| 8
| 38
| 19
| 30
| 2
| 28
| 14
| 25
| 7
| 1
| 2
| 11
|
5,450
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/swiftformer/modeling_swiftformer.py
|
transformers.models.swiftformer.modeling_swiftformer.SwiftFormerEncoderBlock
|
from .configuration_swiftformer import SwiftFormerConfig
from torch import nn
import torch
class SwiftFormerEncoderBlock(nn.Module):
"""
SwiftFormer Encoder Block for SwiftFormer. It consists of (1) Local representation module, (2)
SwiftFormerEfficientAdditiveAttention, and (3) MLP block.
Input: tensor of shape `[batch_size, channels, height, width]`
Output: tensor of shape `[batch_size, channels,height, width]`
"""
def __init__(self, config: SwiftFormerConfig, dim: int, drop_path: float=0.0) -> None:
super().__init__()
layer_scale_init_value = config.layer_scale_init_value
use_layer_scale = config.use_layer_scale
self.local_representation = SwiftFormerLocalRepresentation(config, dim=dim)
self.attn = SwiftFormerEfficientAdditiveAttention(config, dim=dim)
self.linear = SwiftFormerMlp(config, in_features=dim)
self.drop_path = SwiftFormerDropPath(config) if drop_path > 0.0 else nn.Identity()
self.use_layer_scale = use_layer_scale
if use_layer_scale:
self.layer_scale_1 = nn.Parameter(layer_scale_init_value * torch.ones(dim).unsqueeze(-1).unsqueeze(-1), requires_grad=True)
self.layer_scale_2 = nn.Parameter(layer_scale_init_value * torch.ones(dim).unsqueeze(-1).unsqueeze(-1), requires_grad=True)
def forward(self, x):
x = self.local_representation(x)
batch_size, channels, height, width = x.shape
res = self.attn(x.permute(0, 2, 3, 1).reshape(batch_size, height * width, channels))
res = res.reshape(batch_size, height, width, channels).permute(0, 3, 1, 2)
if self.use_layer_scale:
x = x + self.drop_path(self.layer_scale_1 * res)
x = x + self.drop_path(self.layer_scale_2 * self.linear(x))
else:
x = x + self.drop_path(res)
x = x + self.drop_path(self.linear(x))
return x
|
class SwiftFormerEncoderBlock(nn.Module):
'''
SwiftFormer Encoder Block for SwiftFormer. It consists of (1) Local representation module, (2)
SwiftFormerEfficientAdditiveAttention, and (3) MLP block.
Input: tensor of shape `[batch_size, channels, height, width]`
Output: tensor of shape `[batch_size, channels,height, width]`
'''
def __init__(self, config: SwiftFormerConfig, dim: int, drop_path: float=0.0) -> None:
pass
def forward(self, x):
pass
| 3
| 1
| 15
| 1
| 14
| 0
| 3
| 0.21
| 1
| 8
| 5
| 0
| 2
| 7
| 2
| 12
| 41
| 6
| 29
| 14
| 26
| 6
| 24
| 14
| 21
| 3
| 1
| 1
| 5
|
5,451
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/swiftformer/modeling_swiftformer.py
|
transformers.models.swiftformer.modeling_swiftformer.SwiftFormerForImageClassification
|
from ...utils import auto_docstring, logging
import torch
from torch import nn
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from typing import Optional, Union
from .configuration_swiftformer import SwiftFormerConfig
@auto_docstring
class SwiftFormerForImageClassification(SwiftFormerPreTrainedModel):
def __init__(self, config: SwiftFormerConfig) -> None:
super().__init__(config)
embed_dims = config.embed_dims
self.num_labels = config.num_labels
self.swiftformer = SwiftFormerModel(config)
self.norm = nn.BatchNorm2d(embed_dims[-1], eps=config.batch_norm_eps)
self.head = nn.Linear(embed_dims[-1], self.num_labels) if self.num_labels > 0 else nn.Identity()
self.dist_head = nn.Linear(embed_dims[-1], self.num_labels) if self.num_labels > 0 else nn.Identity()
self.post_init()
@auto_docstring
def forward(self, pixel_values: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, ImageClassifierOutputWithNoAttention]:
"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.swiftformer(pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict)
sequence_output = outputs.last_hidden_state if return_dict else outputs[0]
sequence_output = self.norm(sequence_output)
sequence_output = sequence_output.flatten(2).mean(-1)
cls_out = self.head(sequence_output)
distillation_out = self.dist_head(sequence_output)
logits = (cls_out + distillation_out) / 2
loss = None
if labels is not None:
loss = self.loss_function(labels, logits, self.config)
if not return_dict:
output = (logits,) + outputs[1:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=loss, logits=logits, hidden_states=outputs.hidden_states)
|
@auto_docstring
class SwiftFormerForImageClassification(SwiftFormerPreTrainedModel):
def __init__(self, config: SwiftFormerConfig) -> None:
pass
@auto_docstring
def forward(self, pixel_values: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, ImageClassifierOutputWithNoAttention]:
'''
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
'''
pass
| 5
| 1
| 40
| 6
| 29
| 6
| 8
| 0.17
| 1
| 8
| 3
| 0
| 2
| 5
| 2
| 3
| 88
| 12
| 65
| 24
| 49
| 11
| 39
| 17
| 36
| 13
| 2
| 3
| 16
|
5,452
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/swiftformer/modeling_swiftformer.py
|
transformers.models.swiftformer.modeling_swiftformer.SwiftFormerLocalRepresentation
|
from torch import nn
import torch
from .configuration_swiftformer import SwiftFormerConfig
class SwiftFormerLocalRepresentation(nn.Module):
"""
Local Representation module for SwiftFormer that is implemented by 3*3 depth-wise and point-wise convolutions.
Input: tensor of shape `[batch_size, channels, height, width]`
Output: tensor of shape `[batch_size, channels, height, width]`
"""
def __init__(self, config: SwiftFormerConfig, dim: int):
super().__init__()
self.depth_wise_conv = nn.Conv2d(dim, dim, kernel_size=3, padding=1, groups=dim)
self.norm = nn.BatchNorm2d(dim, eps=config.batch_norm_eps)
self.point_wise_conv1 = nn.Conv2d(dim, dim, kernel_size=1)
self.act = nn.GELU()
self.point_wise_conv2 = nn.Conv2d(dim, dim, kernel_size=1)
self.drop_path = nn.Identity()
self.layer_scale = nn.Parameter(torch.ones(dim).unsqueeze(-1).unsqueeze(-1), requires_grad=True)
def forward(self, x):
input = x
x = self.depth_wise_conv(x)
x = self.norm(x)
x = self.point_wise_conv1(x)
x = self.act(x)
x = self.point_wise_conv2(x)
x = input + self.drop_path(self.layer_scale * x)
return x
|
class SwiftFormerLocalRepresentation(nn.Module):
'''
Local Representation module for SwiftFormer that is implemented by 3*3 depth-wise and point-wise convolutions.
Input: tensor of shape `[batch_size, channels, height, width]`
Output: tensor of shape `[batch_size, channels, height, width]`
'''
def __init__(self, config: SwiftFormerConfig, dim: int):
pass
def forward(self, x):
pass
| 3
| 1
| 10
| 1
| 9
| 0
| 1
| 0.26
| 1
| 3
| 1
| 0
| 2
| 7
| 2
| 12
| 29
| 5
| 19
| 10
| 16
| 5
| 19
| 10
| 16
| 1
| 1
| 0
| 2
|
5,453
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/swiftformer/modeling_swiftformer.py
|
transformers.models.swiftformer.modeling_swiftformer.SwiftFormerMlp
|
from torch import nn
from .configuration_swiftformer import SwiftFormerConfig
from ...activations import ACT2CLS
class SwiftFormerMlp(nn.Module):
"""
MLP layer with 1*1 convolutions.
Input: tensor of shape `[batch_size, channels, height, width]`
Output: tensor of shape `[batch_size, channels, height, width]`
"""
def __init__(self, config: SwiftFormerConfig, in_features: int):
super().__init__()
hidden_features = int(in_features * config.mlp_ratio)
self.norm1 = nn.BatchNorm2d(in_features, eps=config.batch_norm_eps)
self.fc1 = nn.Conv2d(in_features, hidden_features, 1)
act_layer = ACT2CLS[config.hidden_act]
self.act = act_layer()
self.fc2 = nn.Conv2d(hidden_features, in_features, 1)
self.drop = nn.Dropout(p=config.drop_mlp_rate)
def forward(self, x):
x = self.norm1(x)
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
|
class SwiftFormerMlp(nn.Module):
'''
MLP layer with 1*1 convolutions.
Input: tensor of shape `[batch_size, channels, height, width]`
Output: tensor of shape `[batch_size, channels, height, width]`
'''
def __init__(self, config: SwiftFormerConfig, in_features: int):
pass
def forward(self, x):
pass
| 3
| 1
| 9
| 0
| 9
| 0
| 1
| 0.28
| 1
| 3
| 1
| 0
| 2
| 5
| 2
| 12
| 27
| 4
| 18
| 10
| 15
| 5
| 18
| 10
| 15
| 1
| 1
| 0
| 2
|
5,454
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/swiftformer/modeling_swiftformer.py
|
transformers.models.swiftformer.modeling_swiftformer.SwiftFormerModel
|
from .configuration_swiftformer import SwiftFormerConfig
from typing import Optional, Union
from ...utils import auto_docstring, logging
import torch
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
@auto_docstring
class SwiftFormerModel(SwiftFormerPreTrainedModel):
def __init__(self, config: SwiftFormerConfig):
super().__init__(config)
self.config = config
self.patch_embed = SwiftFormerPatchEmbedding(config)
self.encoder = SwiftFormerEncoder(config)
self.post_init()
@auto_docstring
def forward(self, pixel_values: Optional[torch.Tensor]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutputWithNoAttention]:
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('You have to specify pixel_values')
embedding_output = self.patch_embed(pixel_values)
encoder_outputs = self.encoder(embedding_output, output_hidden_states=output_hidden_states, return_dict=return_dict)
if not return_dict:
return tuple((v for v in encoder_outputs if v is not None))
return BaseModelOutputWithNoAttention(last_hidden_state=encoder_outputs.last_hidden_state, hidden_states=encoder_outputs.hidden_states)
|
@auto_docstring
class SwiftFormerModel(SwiftFormerPreTrainedModel):
def __init__(self, config: SwiftFormerConfig):
pass
@auto_docstring
def forward(self, pixel_values: Optional[torch.Tensor]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutputWithNoAttention]:
pass
| 5
| 0
| 20
| 4
| 15
| 1
| 3
| 0.05
| 1
| 9
| 4
| 0
| 2
| 3
| 2
| 3
| 49
| 8
| 39
| 14
| 23
| 2
| 17
| 8
| 14
| 5
| 2
| 1
| 6
|
5,455
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/swiftformer/modeling_swiftformer.py
|
transformers.models.swiftformer.modeling_swiftformer.SwiftFormerPatchEmbedding
|
from .configuration_swiftformer import SwiftFormerConfig
from torch import nn
class SwiftFormerPatchEmbedding(nn.Module):
"""
Patch Embedding Layer constructed of two 2D convolutional layers.
Input: tensor of shape `[batch_size, in_channels, height, width]`
Output: tensor of shape `[batch_size, out_channels, height/4, width/4]`
"""
def __init__(self, config: SwiftFormerConfig):
super().__init__()
in_chs = config.num_channels
out_chs = config.embed_dims[0]
self.patch_embedding = nn.Sequential(nn.Conv2d(in_chs, out_chs // 2, kernel_size=3, stride=2, padding=1), nn.BatchNorm2d(out_chs // 2, eps=config.batch_norm_eps), nn.ReLU(), nn.Conv2d(out_chs // 2, out_chs, kernel_size=3, stride=2, padding=1), nn.BatchNorm2d(out_chs, eps=config.batch_norm_eps), nn.ReLU())
def forward(self, x):
return self.patch_embedding(x)
|
class SwiftFormerPatchEmbedding(nn.Module):
'''
Patch Embedding Layer constructed of two 2D convolutional layers.
Input: tensor of shape `[batch_size, in_channels, height, width]`
Output: tensor of shape `[batch_size, out_channels, height/4, width/4]`
'''
def __init__(self, config: SwiftFormerConfig):
pass
def forward(self, x):
pass
| 3
| 1
| 8
| 1
| 7
| 0
| 1
| 0.33
| 1
| 2
| 1
| 0
| 2
| 1
| 2
| 12
| 25
| 5
| 15
| 6
| 12
| 5
| 8
| 6
| 5
| 1
| 1
| 0
| 2
|
5,456
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/swiftformer/modeling_swiftformer.py
|
transformers.models.swiftformer.modeling_swiftformer.SwiftFormerPreTrainedModel
|
from ...utils import auto_docstring, logging
from torch import nn
from ...modeling_utils import PreTrainedModel
from .configuration_swiftformer import SwiftFormerConfig
@auto_docstring
class SwiftFormerPreTrainedModel(PreTrainedModel):
config: SwiftFormerConfig
base_model_prefix = 'swiftformer'
main_input_name = 'pixel_values'
supports_gradient_checkpointing = True
_no_split_modules = ['SwiftFormerEncoderBlock']
def _init_weights(self, module: nn.Module) -> None:
"""Initialize the weights"""
if isinstance(module, (nn.Conv2d, nn.Linear)):
nn.init.trunc_normal_(module.weight, std=0.02)
if module.bias is not None:
nn.init.constant_(module.bias, 0)
elif isinstance(module, (nn.LayerNorm, nn.BatchNorm2d)):
nn.init.constant_(module.bias, 0)
nn.init.constant_(module.weight, 1.0)
elif isinstance(module, (SwiftFormerConvEncoder, SwiftFormerLocalRepresentation)):
module.layer_scale.data.fill_(1.0)
elif isinstance(module, SwiftFormerEncoderBlock):
if self.config.use_layer_scale:
module.layer_scale_1.data.fill_(self.config.layer_scale_init_value)
module.layer_scale_2.data.fill_(self.config.layer_scale_init_value)
elif isinstance(module, SwiftFormerEfficientAdditiveAttention):
nn.init.normal_(module.w_g)
|
@auto_docstring
class SwiftFormerPreTrainedModel(PreTrainedModel):
def _init_weights(self, module: nn.Module) -> None:
'''Initialize the weights'''
pass
| 3
| 1
| 9
| 0
| 8
| 1
| 4
| 0.36
| 1
| 0
| 0
| 2
| 1
| 0
| 1
| 1
| 21
| 2
| 14
| 7
| 12
| 5
| 13
| 7
| 11
| 4
| 1
| 2
| 4
|
5,457
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/swiftformer/modeling_swiftformer.py
|
transformers.models.swiftformer.modeling_swiftformer.SwiftFormerStage
|
from .configuration_swiftformer import SwiftFormerConfig
from torch import nn
class SwiftFormerStage(nn.Module):
"""
A Swiftformer stage consisting of a series of `SwiftFormerConvEncoder` blocks and a final
`SwiftFormerEncoderBlock`.
Input: tensor in shape `[batch_size, channels, height, width]`
Output: tensor in shape `[batch_size, channels, height, width]`
"""
def __init__(self, config: SwiftFormerConfig, index: int) -> None:
super().__init__()
layer_depths = config.depths
dim = config.embed_dims[index]
depth = layer_depths[index]
blocks = []
for block_idx in range(depth):
block_dpr = config.drop_path_rate * (block_idx + sum(layer_depths[:index])) / (sum(layer_depths) - 1)
if depth - block_idx <= 1:
blocks.append(SwiftFormerEncoderBlock(config, dim=dim, drop_path=block_dpr))
else:
blocks.append(SwiftFormerConvEncoder(config, dim=dim))
self.blocks = nn.ModuleList(blocks)
def forward(self, input):
for block in self.blocks:
input = block(input)
return input
|
class SwiftFormerStage(nn.Module):
'''
A Swiftformer stage consisting of a series of `SwiftFormerConvEncoder` blocks and a final
`SwiftFormerEncoderBlock`.
Input: tensor in shape `[batch_size, channels, height, width]`
Output: tensor in shape `[batch_size, channels, height, width]`
'''
def __init__(self, config: SwiftFormerConfig, index: int) -> None:
pass
def forward(self, input):
pass
| 3
| 1
| 11
| 2
| 9
| 0
| 3
| 0.33
| 1
| 6
| 3
| 0
| 2
| 1
| 2
| 12
| 32
| 8
| 18
| 11
| 15
| 6
| 17
| 11
| 14
| 3
| 1
| 2
| 5
|
5,458
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/swin/configuration_swin.py
|
transformers.models.swin.configuration_swin.SwinConfig
|
from ...configuration_utils import PretrainedConfig
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
class SwinConfig(BackboneConfigMixin, PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`SwinModel`]. It is used to instantiate a Swin
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the Swin
[microsoft/swin-tiny-patch4-window7-224](https://huggingface.co/microsoft/swin-tiny-patch4-window7-224)
architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
image_size (`int`, *optional*, defaults to 224):
The size (resolution) of each image.
patch_size (`int`, *optional*, defaults to 4):
The size (resolution) of each patch.
num_channels (`int`, *optional*, defaults to 3):
The number of input channels.
embed_dim (`int`, *optional*, defaults to 96):
Dimensionality of patch embedding.
depths (`list(int)`, *optional*, defaults to `[2, 2, 6, 2]`):
Depth of each layer in the Transformer encoder.
num_heads (`list(int)`, *optional*, defaults to `[3, 6, 12, 24]`):
Number of attention heads in each layer of the Transformer encoder.
window_size (`int`, *optional*, defaults to 7):
Size of windows.
mlp_ratio (`float`, *optional*, defaults to 4.0):
Ratio of MLP hidden dimensionality to embedding dimensionality.
qkv_bias (`bool`, *optional*, defaults to `True`):
Whether or not a learnable bias should be added to the queries, keys and values.
hidden_dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout probability for all fully connected layers in the embeddings and encoder.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
drop_path_rate (`float`, *optional*, defaults to 0.1):
Stochastic depth rate.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder. If string, `"gelu"`, `"relu"`,
`"selu"` and `"gelu_new"` are supported.
use_absolute_embeddings (`bool`, *optional*, defaults to `False`):
Whether or not to add absolute position embeddings to the patch embeddings.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the layer normalization layers.
encoder_stride (`int`, *optional*, defaults to 32):
Factor to increase the spatial resolution by in the decoder head for masked image modeling.
out_features (`list[str]`, *optional*):
If used as backbone, list of features to output. Can be any of `"stem"`, `"stage1"`, `"stage2"`, etc.
(depending on how many stages the model has). If unset and `out_indices` is set, will default to the
corresponding stages. If unset and `out_indices` is unset, will default to the last stage. Must be in the
same order as defined in the `stage_names` attribute.
out_indices (`list[int]`, *optional*):
If used as backbone, list of indices of features to output. Can be any of 0, 1, 2, etc. (depending on how
many stages the model has). If unset and `out_features` is set, will default to the corresponding stages.
If unset and `out_features` is unset, will default to the last stage. Must be in the
same order as defined in the `stage_names` attribute.
Example:
```python
>>> from transformers import SwinConfig, SwinModel
>>> # Initializing a Swin microsoft/swin-tiny-patch4-window7-224 style configuration
>>> configuration = SwinConfig()
>>> # Initializing a model (with random weights) from the microsoft/swin-tiny-patch4-window7-224 style configuration
>>> model = SwinModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = 'swin'
attribute_map = {'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'}
def __init__(self, image_size=224, patch_size=4, num_channels=3, embed_dim=96, depths=[2, 2, 6, 2], num_heads=[3, 6, 12, 24], window_size=7, mlp_ratio=4.0, qkv_bias=True, hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, drop_path_rate=0.1, hidden_act='gelu', use_absolute_embeddings=False, initializer_range=0.02, layer_norm_eps=1e-05, encoder_stride=32, out_features=None, out_indices=None, **kwargs):
super().__init__(**kwargs)
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.embed_dim = embed_dim
self.depths = depths
self.num_layers = len(depths)
self.num_heads = num_heads
self.window_size = window_size
self.mlp_ratio = mlp_ratio
self.qkv_bias = qkv_bias
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.drop_path_rate = drop_path_rate
self.hidden_act = hidden_act
self.use_absolute_embeddings = use_absolute_embeddings
self.layer_norm_eps = layer_norm_eps
self.initializer_range = initializer_range
self.encoder_stride = encoder_stride
self.hidden_size = int(embed_dim * 2 ** (len(depths) - 1))
self.stage_names = ['stem'] + [f'stage{idx}' for idx in range(1, len(depths) + 1)]
self._out_features, self._out_indices = get_aligned_output_features_output_indices(out_features=out_features, out_indices=out_indices, stage_names=self.stage_names)
|
class SwinConfig(BackboneConfigMixin, PretrainedConfig):
'''
This is the configuration class to store the configuration of a [`SwinModel`]. It is used to instantiate a Swin
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the Swin
[microsoft/swin-tiny-patch4-window7-224](https://huggingface.co/microsoft/swin-tiny-patch4-window7-224)
architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
image_size (`int`, *optional*, defaults to 224):
The size (resolution) of each image.
patch_size (`int`, *optional*, defaults to 4):
The size (resolution) of each patch.
num_channels (`int`, *optional*, defaults to 3):
The number of input channels.
embed_dim (`int`, *optional*, defaults to 96):
Dimensionality of patch embedding.
depths (`list(int)`, *optional*, defaults to `[2, 2, 6, 2]`):
Depth of each layer in the Transformer encoder.
num_heads (`list(int)`, *optional*, defaults to `[3, 6, 12, 24]`):
Number of attention heads in each layer of the Transformer encoder.
window_size (`int`, *optional*, defaults to 7):
Size of windows.
mlp_ratio (`float`, *optional*, defaults to 4.0):
Ratio of MLP hidden dimensionality to embedding dimensionality.
qkv_bias (`bool`, *optional*, defaults to `True`):
Whether or not a learnable bias should be added to the queries, keys and values.
hidden_dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout probability for all fully connected layers in the embeddings and encoder.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
drop_path_rate (`float`, *optional*, defaults to 0.1):
Stochastic depth rate.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder. If string, `"gelu"`, `"relu"`,
`"selu"` and `"gelu_new"` are supported.
use_absolute_embeddings (`bool`, *optional*, defaults to `False`):
Whether or not to add absolute position embeddings to the patch embeddings.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the layer normalization layers.
encoder_stride (`int`, *optional*, defaults to 32):
Factor to increase the spatial resolution by in the decoder head for masked image modeling.
out_features (`list[str]`, *optional*):
If used as backbone, list of features to output. Can be any of `"stem"`, `"stage1"`, `"stage2"`, etc.
(depending on how many stages the model has). If unset and `out_indices` is set, will default to the
corresponding stages. If unset and `out_indices` is unset, will default to the last stage. Must be in the
same order as defined in the `stage_names` attribute.
out_indices (`list[int]`, *optional*):
If used as backbone, list of indices of features to output. Can be any of 0, 1, 2, etc. (depending on how
many stages the model has). If unset and `out_features` is set, will default to the corresponding stages.
If unset and `out_features` is unset, will default to the last stage. Must be in the
same order as defined in the `stage_names` attribute.
Example:
```python
>>> from transformers import SwinConfig, SwinModel
>>> # Initializing a Swin microsoft/swin-tiny-patch4-window7-224 style configuration
>>> configuration = SwinConfig()
>>> # Initializing a model (with random weights) from the microsoft/swin-tiny-patch4-window7-224 style configuration
>>> model = SwinModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```'''
def __init__(self, image_size=224, patch_size=4, num_channels=3, embed_dim=96, depths=[2, 2, 6, 2], num_heads=[3, 6, 12, 24], window_size=7, mlp_ratio=4.0, qkv_bias=True, hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, drop_path_rate=0.1, hidden_act='gelu', use_absolute_embeddings=False, initializer_range=0.02, layer_norm_eps=1e-05, encoder_stride=32, out_features=None, out_indices=None, **kwargs):
pass
| 2
| 1
| 50
| 1
| 47
| 2
| 1
| 1.25
| 2
| 3
| 0
| 0
| 1
| 22
| 1
| 6
| 130
| 11
| 53
| 47
| 29
| 66
| 26
| 25
| 24
| 1
| 1
| 0
| 1
|
5,459
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/swin/configuration_swin.py
|
transformers.models.swin.configuration_swin.SwinOnnxConfig
|
from collections import OrderedDict
from collections.abc import Mapping
from ...onnx import OnnxConfig
from packaging import version
class SwinOnnxConfig(OnnxConfig):
torch_onnx_minimum_version = version.parse('1.11')
@property
def inputs(self) -> Mapping[str, Mapping[int, str]]:
return OrderedDict([('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'})])
@property
def atol_for_validation(self) -> float:
return 0.0001
|
class SwinOnnxConfig(OnnxConfig):
@property
def inputs(self) -> Mapping[str, Mapping[int, str]]:
pass
@property
def atol_for_validation(self) -> float:
pass
| 5
| 0
| 4
| 0
| 4
| 0
| 1
| 0
| 1
| 4
| 0
| 0
| 2
| 0
| 2
| 2
| 14
| 2
| 12
| 6
| 7
| 0
| 6
| 4
| 3
| 1
| 1
| 0
| 2
|
5,460
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/swin/modeling_swin.py
|
transformers.models.swin.modeling_swin.SwinAttention
|
from ...pytorch_utils import find_pruneable_heads_and_indices, meshgrid, prune_linear_layer
from typing import Optional, Union
from torch import nn
import torch
class SwinAttention(nn.Module):
def __init__(self, config, dim, num_heads, window_size):
super().__init__()
self.self = SwinSelfAttention(config, dim, num_heads, window_size)
self.output = SwinSelfOutput(config, dim)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads)
self.self.query = prune_linear_layer(self.self.query, index)
self.self.key = prune_linear_layer(self.self.key, index)
self.self.value = prune_linear_layer(self.self.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False) -> tuple[torch.Tensor]:
self_outputs = self.self(hidden_states, attention_mask, head_mask, output_attentions)
attention_output = self.output(self_outputs[0], hidden_states)
outputs = (attention_output,) + self_outputs[1:]
return outputs
|
class SwinAttention(nn.Module):
def __init__(self, config, dim, num_heads, window_size):
pass
def prune_heads(self, heads):
pass
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False) -> tuple[torch.Tensor]:
pass
| 4
| 0
| 11
| 1
| 10
| 1
| 1
| 0.1
| 1
| 6
| 2
| 0
| 3
| 3
| 3
| 13
| 36
| 4
| 30
| 17
| 20
| 3
| 22
| 11
| 18
| 2
| 1
| 1
| 4
|
5,461
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/swin/modeling_swin.py
|
transformers.models.swin.modeling_swin.SwinBackbone
|
from torch import nn
from ...utils import ModelOutput, auto_docstring, logging, torch_int
import torch
from .configuration_swin import SwinConfig
from ...modeling_outputs import BackboneOutput
from typing import Optional, Union
from ...utils.backbone_utils import BackboneMixin
@auto_docstring(custom_intro='\n Swin backbone, to be used with frameworks like DETR and MaskFormer.\n ')
class SwinBackbone(SwinPreTrainedModel, BackboneMixin):
def __init__(self, config: SwinConfig):
super().__init__(config)
super()._init_backbone(config)
self.num_features = [config.embed_dim] + [int(config.embed_dim * 2 ** i) for i in range(len(config.depths))]
self.embeddings = SwinEmbeddings(config)
self.encoder = SwinEncoder(config, self.embeddings.patch_grid)
hidden_states_norms = {}
for stage, num_channels in zip(self._out_features, self.channels):
hidden_states_norms[stage] = nn.LayerNorm(num_channels)
self.hidden_states_norms = nn.ModuleDict(hidden_states_norms)
self.post_init()
def get_input_embeddings(self):
return self.embeddings.patch_embeddings
def forward(self, pixel_values: torch.Tensor, output_hidden_states: Optional[bool]=None, output_attentions: Optional[bool]=None, return_dict: Optional[bool]=None) -> BackboneOutput:
"""
Returns:
Examples:
```python
>>> from transformers import AutoImageProcessor, AutoBackbone
>>> import torch
>>> from PIL import Image
>>> import requests
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> processor = AutoImageProcessor.from_pretrained("shi-labs/nat-mini-in1k-224")
>>> model = AutoBackbone.from_pretrained(
... "microsoft/swin-tiny-patch4-window7-224", out_features=["stage1", "stage2", "stage3", "stage4"]
... )
>>> inputs = processor(image, return_tensors="pt")
>>> outputs = model(**inputs)
>>> feature_maps = outputs.feature_maps
>>> list(feature_maps[-1].shape)
[1, 768, 7, 7]
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
embedding_output, input_dimensions = self.embeddings(pixel_values)
outputs = self.encoder(embedding_output, input_dimensions, head_mask=None, output_attentions=output_attentions, output_hidden_states=True, output_hidden_states_before_downsampling=True, always_partition=True, return_dict=True)
hidden_states = outputs.reshaped_hidden_states
feature_maps = ()
for stage, hidden_state in zip(self.stage_names, hidden_states):
if stage in self.out_features:
batch_size, num_channels, height, width = hidden_state.shape
hidden_state = hidden_state.permute(0, 2, 3, 1).contiguous()
hidden_state = hidden_state.view(batch_size, height * width, num_channels)
hidden_state = self.hidden_states_norms[stage](hidden_state)
hidden_state = hidden_state.view(batch_size, height, width, num_channels)
hidden_state = hidden_state.permute(0, 3, 1, 2).contiguous()
feature_maps += (hidden_state,)
if not return_dict:
output = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(feature_maps=feature_maps, hidden_states=outputs.hidden_states if output_hidden_states else None, attentions=outputs.attentions)
|
@auto_docstring(custom_intro='\n Swin backbone, to be used with frameworks like DETR and MaskFormer.\n ')
class SwinBackbone(SwinPreTrainedModel, BackboneMixin):
def __init__(self, config: SwinConfig):
pass
def get_input_embeddings(self):
pass
def forward(self, pixel_values: torch.Tensor, output_hidden_states: Optional[bool]=None, output_attentions: Optional[bool]=None, return_dict: Optional[bool]=None) -> BackboneOutput:
'''
Returns:
Examples:
```python
>>> from transformers import AutoImageProcessor, AutoBackbone
>>> import torch
>>> from PIL import Image
>>> import requests
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> processor = AutoImageProcessor.from_pretrained("shi-labs/nat-mini-in1k-224")
>>> model = AutoBackbone.from_pretrained(
... "microsoft/swin-tiny-patch4-window7-224", out_features=["stage1", "stage2", "stage3", "stage4"]
... )
>>> inputs = processor(image, return_tensors="pt")
>>> outputs = model(**inputs)
>>> feature_maps = outputs.feature_maps
>>> list(feature_maps[-1].shape)
[1, 768, 7, 7]
```'''
pass
| 5
| 1
| 31
| 5
| 19
| 7
| 4
| 0.38
| 2
| 10
| 4
| 0
| 3
| 4
| 3
| 16
| 96
| 16
| 58
| 23
| 48
| 22
| 37
| 17
| 33
| 9
| 2
| 2
| 12
|
5,462
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/swin/modeling_swin.py
|
transformers.models.swin.modeling_swin.SwinDropPath
|
from typing import Optional, Union
from torch import nn
import torch
class SwinDropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
def __init__(self, drop_prob: Optional[float]=None) -> None:
super().__init__()
self.drop_prob = drop_prob
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
return drop_path(hidden_states, self.drop_prob, self.training)
def extra_repr(self) -> str:
return f'p={self.drop_prob}'
|
class SwinDropPath(nn.Module):
'''Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).'''
def __init__(self, drop_prob: Optional[float]=None) -> None:
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
def extra_repr(self) -> str:
pass
| 4
| 1
| 2
| 0
| 2
| 0
| 1
| 0.13
| 1
| 4
| 0
| 0
| 3
| 1
| 3
| 13
| 12
| 3
| 8
| 5
| 4
| 1
| 8
| 5
| 4
| 1
| 1
| 0
| 3
|
5,463
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/swin/modeling_swin.py
|
transformers.models.swin.modeling_swin.SwinEmbeddings
|
import torch
from typing import Optional, Union
from torch import nn
from ...utils import ModelOutput, auto_docstring, logging, torch_int
class SwinEmbeddings(nn.Module):
"""
Construct the patch and position embeddings. Optionally, also the mask token.
"""
def __init__(self, config, use_mask_token=False):
super().__init__()
self.patch_embeddings = SwinPatchEmbeddings(config)
num_patches = self.patch_embeddings.num_patches
self.patch_grid = self.patch_embeddings.grid_size
self.mask_token = nn.Parameter(torch.zeros(1, 1, config.embed_dim)) if use_mask_token else None
if config.use_absolute_embeddings:
self.position_embeddings = nn.Parameter(torch.zeros(1, num_patches + 1, config.embed_dim))
else:
self.position_embeddings = None
self.norm = nn.LayerNorm(config.embed_dim)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.patch_size = config.patch_size
self.config = config
def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor:
"""
This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution
images. This method is also adapted to support torch.jit tracing.
Adapted from:
- https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174-L194, and
- https://github.com/facebookresearch/dinov2/blob/e1277af2ba9496fbadf7aec6eba56e8d882d1e35/dinov2/models/vision_transformer.py#L179-L211
"""
num_patches = embeddings.shape[1] - 1
num_positions = self.position_embeddings.shape[1] - 1
if not torch.jit.is_tracing() and num_patches == num_positions and (height == width):
return self.position_embeddings
class_pos_embed = self.position_embeddings[:, :1]
patch_pos_embed = self.position_embeddings[:, 1:]
dim = embeddings.shape[-1]
new_height = height // self.patch_size
new_width = width // self.patch_size
sqrt_num_positions = torch_int(num_positions ** 0.5)
patch_pos_embed = patch_pos_embed.reshape(1, sqrt_num_positions, sqrt_num_positions, dim)
patch_pos_embed = patch_pos_embed.permute(0, 3, 1, 2)
patch_pos_embed = nn.functional.interpolate(patch_pos_embed, size=(new_height, new_width), mode='bicubic', align_corners=False)
patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
return torch.cat((class_pos_embed, patch_pos_embed), dim=1)
def forward(self, pixel_values: Optional[torch.FloatTensor], bool_masked_pos: Optional[torch.BoolTensor]=None, interpolate_pos_encoding: bool=False) -> tuple[torch.Tensor]:
_, num_channels, height, width = pixel_values.shape
embeddings, output_dimensions = self.patch_embeddings(pixel_values)
embeddings = self.norm(embeddings)
batch_size, seq_len, _ = embeddings.size()
if bool_masked_pos is not None:
mask_tokens = self.mask_token.expand(batch_size, seq_len, -1)
mask = bool_masked_pos.unsqueeze(-1).type_as(mask_tokens)
embeddings = embeddings * (1.0 - mask) + mask_tokens * mask
if self.position_embeddings is not None:
if interpolate_pos_encoding:
embeddings = embeddings + self.interpolate_pos_encoding(embeddings, height, width)
else:
embeddings = embeddings + self.position_embeddings
embeddings = self.dropout(embeddings)
return (embeddings, output_dimensions)
|
class SwinEmbeddings(nn.Module):
'''
Construct the patch and position embeddings. Optionally, also the mask token.
'''
def __init__(self, config, use_mask_token=False):
pass
def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor:
'''
This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution
images. This method is also adapted to support torch.jit tracing.
Adapted from:
- https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174-L194, and
- https://github.com/facebookresearch/dinov2/blob/e1277af2ba9496fbadf7aec6eba56e8d882d1e35/dinov2/models/vision_transformer.py#L179-L211
'''
pass
def forward(self, pixel_values: Optional[torch.FloatTensor], bool_masked_pos: Optional[torch.BoolTensor]=None, interpolate_pos_encoding: bool=False) -> tuple[torch.Tensor]:
pass
| 4
| 2
| 27
| 6
| 19
| 3
| 3
| 0.23
| 1
| 5
| 1
| 0
| 3
| 8
| 3
| 13
| 90
| 20
| 57
| 31
| 48
| 13
| 45
| 26
| 41
| 4
| 1
| 2
| 9
|
5,464
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/swin/modeling_swin.py
|
transformers.models.swin.modeling_swin.SwinEncoder
|
from typing import Optional, Union
import torch
from torch import nn
class SwinEncoder(nn.Module):
def __init__(self, config, grid_size):
super().__init__()
self.num_layers = len(config.depths)
self.config = config
dpr = [x.item() for x in torch.linspace(0, config.drop_path_rate, sum(config.depths), device='cpu')]
self.layers = nn.ModuleList([SwinStage(config=config, dim=int(config.embed_dim * 2 ** i_layer), input_resolution=(grid_size[0] // 2 ** i_layer, grid_size[1] // 2 ** i_layer), depth=config.depths[i_layer], num_heads=config.num_heads[i_layer], drop_path=dpr[sum(config.depths[:i_layer]):sum(config.depths[:i_layer + 1])], downsample=SwinPatchMerging if i_layer < self.num_layers - 1 else None) for i_layer in range(self.num_layers)])
self.gradient_checkpointing = False
def forward(self, hidden_states: torch.Tensor, input_dimensions: tuple[int, int], head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False, output_hidden_states: Optional[bool]=False, output_hidden_states_before_downsampling: Optional[bool]=False, always_partition: Optional[bool]=False, return_dict: Optional[bool]=True) -> Union[tuple, SwinEncoderOutput]:
all_hidden_states = () if output_hidden_states else None
all_reshaped_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
if output_hidden_states:
batch_size, _, hidden_size = hidden_states.shape
reshaped_hidden_state = hidden_states.view(batch_size, *input_dimensions, hidden_size)
reshaped_hidden_state = reshaped_hidden_state.permute(0, 3, 1, 2)
all_hidden_states += (hidden_states,)
all_reshaped_hidden_states += (reshaped_hidden_state,)
for i, layer_module in enumerate(self.layers):
layer_head_mask = head_mask[i] if head_mask is not None else None
layer_outputs = layer_module(hidden_states, input_dimensions, layer_head_mask, output_attentions, always_partition)
hidden_states = layer_outputs[0]
hidden_states_before_downsampling = layer_outputs[1]
output_dimensions = layer_outputs[2]
input_dimensions = (output_dimensions[-2], output_dimensions[-1])
if output_hidden_states and output_hidden_states_before_downsampling:
batch_size, _, hidden_size = hidden_states_before_downsampling.shape
reshaped_hidden_state = hidden_states_before_downsampling.view(batch_size, *(output_dimensions[0], output_dimensions[1]), hidden_size)
reshaped_hidden_state = reshaped_hidden_state.permute(0, 3, 1, 2)
all_hidden_states += (hidden_states_before_downsampling,)
all_reshaped_hidden_states += (reshaped_hidden_state,)
elif output_hidden_states and (not output_hidden_states_before_downsampling):
batch_size, _, hidden_size = hidden_states.shape
reshaped_hidden_state = hidden_states.view(batch_size, *input_dimensions, hidden_size)
reshaped_hidden_state = reshaped_hidden_state.permute(0, 3, 1, 2)
all_hidden_states += (hidden_states,)
all_reshaped_hidden_states += (reshaped_hidden_state,)
if output_attentions:
all_self_attentions += layer_outputs[3:]
if not return_dict:
return tuple((v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None))
return SwinEncoderOutput(last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions, reshaped_hidden_states=all_reshaped_hidden_states)
|
class SwinEncoder(nn.Module):
def __init__(self, config, grid_size):
pass
def forward(self, hidden_states: torch.Tensor, input_dimensions: tuple[int, int], head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False, output_hidden_states: Optional[bool]=False, output_hidden_states_before_downsampling: Optional[bool]=False, always_partition: Optional[bool]=False, return_dict: Optional[bool]=True) -> Union[tuple, SwinEncoderOutput]:
pass
| 3
| 0
| 49
| 5
| 42
| 2
| 7
| 0.05
| 1
| 10
| 3
| 0
| 2
| 4
| 2
| 12
| 99
| 11
| 84
| 28
| 71
| 4
| 43
| 18
| 40
| 12
| 1
| 2
| 14
|
5,465
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/swin/modeling_swin.py
|
transformers.models.swin.modeling_swin.SwinEncoderOutput
|
from dataclasses import dataclass
from ...utils import ModelOutput, auto_docstring, logging, torch_int
from typing import Optional, Union
import torch
@dataclass
@auto_docstring(custom_intro="\n Swin encoder's outputs, with potential hidden states and attentions.\n ")
class SwinEncoderOutput(ModelOutput):
"""
reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, hidden_size, height, width)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to
include the spatial dimensions.
"""
last_hidden_state: Optional[torch.FloatTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
attentions: Optional[tuple[torch.FloatTensor, ...]] = None
reshaped_hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
|
@dataclass
@auto_docstring(custom_intro="\n Swin encoder's outputs, with potential hidden states and attentions.\n ")
class SwinEncoderOutput(ModelOutput):
'''
reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, hidden_size, height, width)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to
include the spatial dimensions.
'''
pass
| 3
| 1
| 0
| 0
| 0
| 0
| 0
| 4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 30
| 5
| 5
| 5
| 4
| 20
| 5
| 5
| 4
| 0
| 1
| 0
| 0
|
5,466
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/swin/modeling_swin.py
|
transformers.models.swin.modeling_swin.SwinForImageClassification
|
from typing import Optional, Union
import torch
from torch import nn
from ...utils import ModelOutput, auto_docstring, logging, torch_int
@auto_docstring(custom_intro="\n Swin Model transformer with an image classification head on top (a linear layer on top of the final hidden state of\n the [CLS] token) e.g. for ImageNet.\n\n <Tip>\n\n Note that it's possible to fine-tune Swin on higher resolution images than the ones it has been trained on, by\n setting `interpolate_pos_encoding` to `True` in the forward of the model. This will interpolate the pre-trained\n position embeddings to the higher resolution.\n\n </Tip>\n ")
class SwinForImageClassification(SwinPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.swin = SwinModel(config)
self.classifier = nn.Linear(self.swin.num_features, config.num_labels) if config.num_labels > 0 else nn.Identity()
self.post_init()
@auto_docstring
def forward(self, pixel_values: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, interpolate_pos_encoding: bool=False, return_dict: Optional[bool]=None) -> Union[tuple, SwinImageClassifierOutput]:
"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.swin(pixel_values, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, interpolate_pos_encoding=interpolate_pos_encoding, return_dict=return_dict)
pooled_output = outputs[1]
logits = self.classifier(pooled_output)
loss = None
if labels is not None:
loss = self.loss_function(labels, logits, self.config)
if not return_dict:
output = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return SwinImageClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, reshaped_hidden_states=outputs.reshaped_hidden_states)
|
@auto_docstring(custom_intro="\n Swin Model transformer with an image classification head on top (a linear layer on top of the final hidden state of\n the [CLS] token) e.g. for ImageNet.\n\n <Tip>\n\n Note that it's possible to fine-tune Swin on higher resolution images than the ones it has been trained on, by\n setting `interpolate_pos_encoding` to `True` in the forward of the model. This will interpolate the pre-trained\n position embeddings to the higher resolution.\n\n </Tip>\n ")
class SwinForImageClassification(SwinPreTrainedModel):
def __init__(self, config):
pass
@auto_docstring
def forward(self, pixel_values: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, interpolate_pos_encoding: bool=False, return_dict: Optional[bool]=None) -> Union[tuple, SwinImageClassifierOutput]:
'''
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
'''
pass
| 5
| 1
| 39
| 5
| 30
| 4
| 7
| 0.12
| 1
| 5
| 2
| 0
| 2
| 3
| 2
| 3
| 87
| 11
| 68
| 22
| 49
| 8
| 32
| 12
| 29
| 12
| 2
| 3
| 14
|
5,467
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/swin/modeling_swin.py
|
transformers.models.swin.modeling_swin.SwinForMaskedImageModeling
|
from ...utils import ModelOutput, auto_docstring, logging, torch_int
from torch import nn
import torch
import math
from typing import Optional, Union
@auto_docstring(custom_intro='\n Swin Model with a decoder on top for masked image modeling, as proposed in [SimMIM](https://huggingface.co/papers/2111.09886).\n\n <Tip>\n\n Note that we provide a script to pre-train this model on custom data in our [examples\n directory](https://github.com/huggingface/transformers/tree/main/examples/pytorch/image-pretraining).\n\n </Tip>\n ')
class SwinForMaskedImageModeling(SwinPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.swin = SwinModel(config, add_pooling_layer=False, use_mask_token=True)
num_features = int(config.embed_dim * 2 ** (config.num_layers - 1))
self.decoder = nn.Sequential(nn.Conv2d(in_channels=num_features, out_channels=config.encoder_stride ** 2 * config.num_channels, kernel_size=1), nn.PixelShuffle(config.encoder_stride))
self.post_init()
@auto_docstring
def forward(self, pixel_values: Optional[torch.FloatTensor]=None, bool_masked_pos: Optional[torch.BoolTensor]=None, head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, interpolate_pos_encoding: bool=False, return_dict: Optional[bool]=None) -> Union[tuple, SwinMaskedImageModelingOutput]:
"""
bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, num_patches)`):
Boolean masked positions. Indicates which patches are masked (1) and which aren't (0).
Examples:
```python
>>> from transformers import AutoImageProcessor, SwinForMaskedImageModeling
>>> import torch
>>> from PIL import Image
>>> import requests
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> image_processor = AutoImageProcessor.from_pretrained("microsoft/swin-base-simmim-window6-192")
>>> model = SwinForMaskedImageModeling.from_pretrained("microsoft/swin-base-simmim-window6-192")
>>> num_patches = (model.config.image_size // model.config.patch_size) ** 2
>>> pixel_values = image_processor(images=image, return_tensors="pt").pixel_values
>>> # create random boolean mask of shape (batch_size, num_patches)
>>> bool_masked_pos = torch.randint(low=0, high=2, size=(1, num_patches)).bool()
>>> outputs = model(pixel_values, bool_masked_pos=bool_masked_pos)
>>> loss, reconstructed_pixel_values = outputs.loss, outputs.reconstruction
>>> list(reconstructed_pixel_values.shape)
[1, 3, 192, 192]
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.swin(pixel_values, bool_masked_pos=bool_masked_pos, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, interpolate_pos_encoding=interpolate_pos_encoding, return_dict=return_dict)
sequence_output = outputs[0]
sequence_output = sequence_output.transpose(1, 2)
batch_size, num_channels, sequence_length = sequence_output.shape
height = width = math.floor(sequence_length ** 0.5)
sequence_output = sequence_output.reshape(batch_size, num_channels, height, width)
reconstructed_pixel_values = self.decoder(sequence_output)
masked_im_loss = None
if bool_masked_pos is not None:
size = self.config.image_size // self.config.patch_size
bool_masked_pos = bool_masked_pos.reshape(-1, size, size)
mask = bool_masked_pos.repeat_interleave(self.config.patch_size, 1).repeat_interleave(self.config.patch_size, 2).unsqueeze(1).contiguous()
reconstruction_loss = nn.functional.l1_loss(pixel_values, reconstructed_pixel_values, reduction='none')
masked_im_loss = (reconstruction_loss * mask).sum() / (mask.sum() + 1e-05) / self.config.num_channels
if not return_dict:
output = (reconstructed_pixel_values,) + outputs[2:]
return (masked_im_loss,) + output if masked_im_loss is not None else output
return SwinMaskedImageModelingOutput(loss=masked_im_loss, reconstruction=reconstructed_pixel_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, reshaped_hidden_states=outputs.reshaped_hidden_states)
|
@auto_docstring(custom_intro='\n Swin Model with a decoder on top for masked image modeling, as proposed in [SimMIM](https://huggingface.co/papers/2111.09886).\n\n <Tip>\n\n Note that we provide a script to pre-train this model on custom data in our [examples\n directory](https://github.com/huggingface/transformers/tree/main/examples/pytorch/image-pretraining).\n\n </Tip>\n ')
class SwinForMaskedImageModeling(SwinPreTrainedModel):
def __init__(self, config):
pass
@auto_docstring
def forward(self, pixel_values: Optional[torch.FloatTensor]=None, bool_masked_pos: Optional[torch.BoolTensor]=None, head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, interpolate_pos_encoding: bool=False, return_dict: Optional[bool]=None) -> Union[tuple, SwinMaskedImageModelingOutput]:
'''
bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, num_patches)`):
Boolean masked positions. Indicates which patches are masked (1) and which aren't (0).
Examples:
```python
>>> from transformers import AutoImageProcessor, SwinForMaskedImageModeling
>>> import torch
>>> from PIL import Image
>>> import requests
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> image_processor = AutoImageProcessor.from_pretrained("microsoft/swin-base-simmim-window6-192")
>>> model = SwinForMaskedImageModeling.from_pretrained("microsoft/swin-base-simmim-window6-192")
>>> num_patches = (model.config.image_size // model.config.patch_size) ** 2
>>> pixel_values = image_processor(images=image, return_tensors="pt").pixel_values
>>> # create random boolean mask of shape (batch_size, num_patches)
>>> bool_masked_pos = torch.randint(low=0, high=2, size=(1, num_patches)).bool()
>>> outputs = model(pixel_values, bool_masked_pos=bool_masked_pos)
>>> loss, reconstructed_pixel_values = outputs.loss, outputs.reconstruction
>>> list(reconstructed_pixel_values.shape)
[1, 3, 192, 192]
```'''
pass
| 5
| 1
| 50
| 8
| 30
| 13
| 3
| 0.42
| 1
| 5
| 2
| 0
| 2
| 2
| 2
| 3
| 104
| 16
| 62
| 26
| 48
| 26
| 27
| 16
| 24
| 5
| 2
| 1
| 6
|
5,468
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/swin/modeling_swin.py
|
transformers.models.swin.modeling_swin.SwinImageClassifierOutput
|
from typing import Optional, Union
from ...utils import ModelOutput, auto_docstring, logging, torch_int
import torch
from dataclasses import dataclass
@dataclass
@auto_docstring(custom_intro='\n Swin outputs for image classification.\n ')
class SwinImageClassifierOutput(ModelOutput):
"""
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Classification (or regression if config.num_labels==1) loss.
logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`):
Classification (or regression if config.num_labels==1) scores (before SoftMax).
reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, hidden_size, height, width)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to
include the spatial dimensions.
"""
loss: Optional[torch.FloatTensor] = None
logits: Optional[torch.FloatTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
attentions: Optional[tuple[torch.FloatTensor, ...]] = None
reshaped_hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
|
@dataclass
@auto_docstring(custom_intro='\n Swin outputs for image classification.\n ')
class SwinImageClassifierOutput(ModelOutput):
'''
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Classification (or regression if config.num_labels==1) loss.
logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`):
Classification (or regression if config.num_labels==1) scores (before SoftMax).
reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, hidden_size, height, width)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to
include the spatial dimensions.
'''
pass
| 3
| 1
| 0
| 0
| 0
| 0
| 0
| 3.67
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 33
| 5
| 6
| 6
| 5
| 22
| 6
| 6
| 5
| 0
| 1
| 0
| 0
|
5,469
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/swin/modeling_swin.py
|
transformers.models.swin.modeling_swin.SwinIntermediate
|
import torch
from ...activations import ACT2FN
from torch import nn
class SwinIntermediate(nn.Module):
def __init__(self, config, dim):
super().__init__()
self.dense = nn.Linear(dim, int(config.mlp_ratio * dim))
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
|
class SwinIntermediate(nn.Module):
def __init__(self, config, dim):
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 6
| 0
| 6
| 0
| 2
| 0
| 1
| 4
| 0
| 0
| 2
| 2
| 2
| 12
| 13
| 1
| 12
| 5
| 9
| 0
| 11
| 5
| 8
| 2
| 1
| 1
| 3
|
5,470
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/swin/modeling_swin.py
|
transformers.models.swin.modeling_swin.SwinLayer
|
from typing import Optional, Union
from ...utils import ModelOutput, auto_docstring, logging, torch_int
import torch
from torch import nn
class SwinLayer(nn.Module):
def __init__(self, config, dim, input_resolution, num_heads, drop_path_rate=0.0, shift_size=0):
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.shift_size = shift_size
self.window_size = config.window_size
self.input_resolution = input_resolution
self.layernorm_before = nn.LayerNorm(dim, eps=config.layer_norm_eps)
self.attention = SwinAttention(config, dim, num_heads, window_size=self.window_size)
self.drop_path = SwinDropPath(drop_path_rate) if drop_path_rate > 0.0 else nn.Identity()
self.layernorm_after = nn.LayerNorm(dim, eps=config.layer_norm_eps)
self.intermediate = SwinIntermediate(config, dim)
self.output = SwinOutput(config, dim)
def set_shift_and_window_size(self, input_resolution):
if min(input_resolution) <= self.window_size:
self.shift_size = torch_int(0)
self.window_size = torch.min(torch.tensor(input_resolution)) if torch.jit.is_tracing() else min(input_resolution)
def get_attn_mask(self, height, width, dtype, device):
if self.shift_size > 0:
img_mask = torch.zeros((1, height, width, 1), dtype=dtype, device=device)
height_slices = (slice(0, -self.window_size), slice(-self.window_size, -self.shift_size), slice(-self.shift_size, None))
width_slices = (slice(0, -self.window_size), slice(-self.window_size, -self.shift_size), slice(-self.shift_size, None))
count = 0
for height_slice in height_slices:
for width_slice in width_slices:
img_mask[:, height_slice, width_slice, :] = count
count += 1
mask_windows = window_partition(img_mask, self.window_size)
mask_windows = mask_windows.view(-1, self.window_size * self.window_size)
attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
attn_mask = attn_mask.masked_fill(attn_mask != 0, -100.0).masked_fill(attn_mask == 0, 0.0)
else:
attn_mask = None
return attn_mask
def maybe_pad(self, hidden_states, height, width):
pad_right = (self.window_size - width % self.window_size) % self.window_size
pad_bottom = (self.window_size - height % self.window_size) % self.window_size
pad_values = (0, 0, 0, pad_right, 0, pad_bottom)
hidden_states = nn.functional.pad(hidden_states, pad_values)
return (hidden_states, pad_values)
def forward(self, hidden_states: torch.Tensor, input_dimensions: tuple[int, int], head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False, always_partition: Optional[bool]=False) -> tuple[torch.Tensor, torch.Tensor]:
if not always_partition:
self.set_shift_and_window_size(input_dimensions)
else:
pass
height, width = input_dimensions
batch_size, _, channels = hidden_states.size()
shortcut = hidden_states
hidden_states = self.layernorm_before(hidden_states)
hidden_states = hidden_states.view(batch_size, height, width, channels)
hidden_states, pad_values = self.maybe_pad(hidden_states, height, width)
_, height_pad, width_pad, _ = hidden_states.shape
if self.shift_size > 0:
shifted_hidden_states = torch.roll(hidden_states, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2))
else:
shifted_hidden_states = hidden_states
hidden_states_windows = window_partition(shifted_hidden_states, self.window_size)
hidden_states_windows = hidden_states_windows.view(-1, self.window_size * self.window_size, channels)
attn_mask = self.get_attn_mask(height_pad, width_pad, dtype=hidden_states.dtype, device=hidden_states_windows.device)
attention_outputs = self.attention(hidden_states_windows, attn_mask, head_mask, output_attentions=output_attentions)
attention_output = attention_outputs[0]
attention_windows = attention_output.view(-1, self.window_size, self.window_size, channels)
shifted_windows = window_reverse(attention_windows, self.window_size, height_pad, width_pad)
if self.shift_size > 0:
attention_windows = torch.roll(shifted_windows, shifts=(self.shift_size, self.shift_size), dims=(1, 2))
else:
attention_windows = shifted_windows
was_padded = pad_values[3] > 0 or pad_values[5] > 0
if was_padded:
attention_windows = attention_windows[:, :height, :width, :].contiguous()
attention_windows = attention_windows.view(batch_size, height * width, channels)
hidden_states = shortcut + self.drop_path(attention_windows)
layer_output = self.layernorm_after(hidden_states)
layer_output = self.intermediate(layer_output)
layer_output = hidden_states + self.output(layer_output)
layer_outputs = (layer_output, attention_outputs[1]) if output_attentions else (layer_output,)
return layer_outputs
|
class SwinLayer(nn.Module):
def __init__(self, config, dim, input_resolution, num_heads, drop_path_rate=0.0, shift_size=0):
pass
def set_shift_and_window_size(self, input_resolution):
pass
def get_attn_mask(self, height, width, dtype, device):
pass
def maybe_pad(self, hidden_states, height, width):
pass
def forward(self, hidden_states: torch.Tensor, input_dimensions: tuple[int, int], head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False, always_partition: Optional[bool]=False) -> tuple[torch.Tensor, torch.Tensor]:
pass
| 6
| 0
| 24
| 3
| 19
| 1
| 3
| 0.06
| 1
| 10
| 4
| 0
| 5
| 10
| 5
| 15
| 123
| 19
| 98
| 49
| 85
| 6
| 73
| 42
| 67
| 6
| 1
| 3
| 16
|
5,471
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/swin/modeling_swin.py
|
transformers.models.swin.modeling_swin.SwinMaskedImageModelingOutput
|
import warnings
import torch
from ...utils import ModelOutput, auto_docstring, logging, torch_int
from dataclasses import dataclass
from typing import Optional, Union
@dataclass
@auto_docstring(custom_intro='\n Swin masked image model outputs.\n ')
class SwinMaskedImageModelingOutput(ModelOutput):
"""
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `bool_masked_pos` is provided):
Masked image modeling (MLM) loss.
reconstruction (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Reconstructed pixel values.
reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, hidden_size, height, width)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to
include the spatial dimensions.
"""
loss: Optional[torch.FloatTensor] = None
reconstruction: Optional[torch.FloatTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
attentions: Optional[tuple[torch.FloatTensor, ...]] = None
reshaped_hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
@property
def logits(self):
warnings.warn('logits attribute is deprecated and will be removed in version 5 of Transformers. Please use the reconstruction attribute to retrieve the final output instead.', FutureWarning)
return self.reconstruction
|
@dataclass
@auto_docstring(custom_intro='\n Swin masked image model outputs.\n ')
class SwinMaskedImageModelingOutput(ModelOutput):
'''
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `bool_masked_pos` is provided):
Masked image modeling (MLM) loss.
reconstruction (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Reconstructed pixel values.
reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, hidden_size, height, width)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to
include the spatial dimensions.
'''
@property
def logits(self):
pass
| 5
| 1
| 7
| 0
| 7
| 0
| 1
| 1.57
| 1
| 1
| 0
| 0
| 1
| 0
| 1
| 1
| 42
| 6
| 14
| 8
| 11
| 22
| 9
| 7
| 7
| 1
| 1
| 0
| 1
|
5,472
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/swin/modeling_swin.py
|
transformers.models.swin.modeling_swin.SwinModel
|
from ...utils import ModelOutput, auto_docstring, logging, torch_int
from torch import nn
from typing import Optional, Union
import torch
@auto_docstring
class SwinModel(SwinPreTrainedModel):
def __init__(self, config, add_pooling_layer=True, use_mask_token=False):
"""
add_pooling_layer (`bool`, *optional*, defaults to `True`):
Whether or not to apply pooling layer.
use_mask_token (`bool`, *optional*, defaults to `False`):
Whether or not to create and apply mask tokens in the embedding layer.
"""
super().__init__(config)
self.config = config
self.num_layers = len(config.depths)
self.num_features = int(config.embed_dim * 2 ** (self.num_layers - 1))
self.embeddings = SwinEmbeddings(config, use_mask_token=use_mask_token)
self.encoder = SwinEncoder(config, self.embeddings.patch_grid)
self.layernorm = nn.LayerNorm(self.num_features, eps=config.layer_norm_eps)
self.pooler = nn.AdaptiveAvgPool1d(1) if add_pooling_layer else None
self.post_init()
def get_input_embeddings(self):
return self.embeddings.patch_embeddings
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@auto_docstring
def forward(self, pixel_values: Optional[torch.FloatTensor]=None, bool_masked_pos: Optional[torch.BoolTensor]=None, head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, interpolate_pos_encoding: bool=False, return_dict: Optional[bool]=None) -> Union[tuple, SwinModelOutput]:
"""
bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, num_patches)`, *optional*):
Boolean masked positions. Indicates which patches are masked (1) and which aren't (0).
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('You have to specify pixel_values')
head_mask = self.get_head_mask(head_mask, len(self.config.depths))
embedding_output, input_dimensions = self.embeddings(pixel_values, bool_masked_pos=bool_masked_pos, interpolate_pos_encoding=interpolate_pos_encoding)
encoder_outputs = self.encoder(embedding_output, input_dimensions, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
sequence_output = encoder_outputs[0]
sequence_output = self.layernorm(sequence_output)
pooled_output = None
if self.pooler is not None:
pooled_output = self.pooler(sequence_output.transpose(1, 2))
pooled_output = torch.flatten(pooled_output, 1)
if not return_dict:
output = (sequence_output, pooled_output) + encoder_outputs[1:]
return output
return SwinModelOutput(last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, reshaped_hidden_states=encoder_outputs.reshaped_hidden_states)
|
@auto_docstring
class SwinModel(SwinPreTrainedModel):
def __init__(self, config, add_pooling_layer=True, use_mask_token=False):
'''
add_pooling_layer (`bool`, *optional*, defaults to `True`):
Whether or not to apply pooling layer.
use_mask_token (`bool`, *optional*, defaults to `False`):
Whether or not to create and apply mask tokens in the embedding layer.
'''
pass
def get_input_embeddings(self):
pass
def _prune_heads(self, heads_to_prune):
'''
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
'''
pass
@auto_docstring
def forward(self, pixel_values: Optional[torch.FloatTensor]=None, bool_masked_pos: Optional[torch.BoolTensor]=None, head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, interpolate_pos_encoding: bool=False, return_dict: Optional[bool]=None) -> Union[tuple, SwinModelOutput]:
'''
bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, num_patches)`, *optional*):
Boolean masked positions. Indicates which patches are masked (1) and which aren't (0).
'''
pass
| 7
| 3
| 22
| 3
| 15
| 4
| 3
| 0.2
| 1
| 7
| 3
| 0
| 4
| 7
| 4
| 5
| 98
| 15
| 69
| 28
| 47
| 14
| 35
| 18
| 30
| 7
| 2
| 1
| 12
|
5,473
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/swin/modeling_swin.py
|
transformers.models.swin.modeling_swin.SwinModelOutput
|
import torch
from typing import Optional, Union
from dataclasses import dataclass
from ...utils import ModelOutput, auto_docstring, logging, torch_int
@dataclass
@auto_docstring(custom_intro="\n Swin model's outputs that also contains a pooling of the last hidden states.\n ")
class SwinModelOutput(ModelOutput):
"""
pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`, *optional*, returned when `add_pooling_layer=True` is passed):
Average pooling of the last layer hidden-state.
reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, hidden_size, height, width)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to
include the spatial dimensions.
"""
last_hidden_state: Optional[torch.FloatTensor] = None
pooler_output: Optional[torch.FloatTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
attentions: Optional[tuple[torch.FloatTensor, ...]] = None
reshaped_hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
|
@dataclass
@auto_docstring(custom_intro="\n Swin model's outputs that also contains a pooling of the last hidden states.\n ")
class SwinModelOutput(ModelOutput):
'''
pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`, *optional*, returned when `add_pooling_layer=True` is passed):
Average pooling of the last layer hidden-state.
reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, hidden_size, height, width)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to
include the spatial dimensions.
'''
pass
| 3
| 1
| 0
| 0
| 0
| 0
| 0
| 3.67
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 33
| 5
| 6
| 6
| 5
| 22
| 6
| 6
| 5
| 0
| 1
| 0
| 0
|
5,474
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/swin/modeling_swin.py
|
transformers.models.swin.modeling_swin.SwinOutput
|
import torch
from torch import nn
class SwinOutput(nn.Module):
def __init__(self, config, dim):
super().__init__()
self.dense = nn.Linear(int(config.mlp_ratio * dim), dim)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
return hidden_states
|
class SwinOutput(nn.Module):
def __init__(self, config, dim):
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 4
| 0
| 4
| 0
| 1
| 0
| 1
| 3
| 0
| 0
| 2
| 2
| 2
| 12
| 10
| 1
| 9
| 5
| 6
| 0
| 9
| 5
| 6
| 1
| 1
| 0
| 2
|
5,475
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/swin/modeling_swin.py
|
transformers.models.swin.modeling_swin.SwinPatchEmbeddings
|
import collections.abc
from torch import nn
from typing import Optional, Union
import torch
class SwinPatchEmbeddings(nn.Module):
"""
This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial
`hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a
Transformer.
"""
def __init__(self, config):
super().__init__()
image_size, patch_size = (config.image_size, config.patch_size)
num_channels, hidden_size = (config.num_channels, config.embed_dim)
image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size)
patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size)
num_patches = image_size[1] // patch_size[1] * (image_size[0] // patch_size[0])
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.num_patches = num_patches
self.grid_size = (image_size[0] // patch_size[0], image_size[1] // patch_size[1])
self.projection = nn.Conv2d(num_channels, hidden_size, kernel_size=patch_size, stride=patch_size)
def maybe_pad(self, pixel_values, height, width):
if width % self.patch_size[1] != 0:
pad_values = (0, self.patch_size[1] - width % self.patch_size[1])
pixel_values = nn.functional.pad(pixel_values, pad_values)
if height % self.patch_size[0] != 0:
pad_values = (0, 0, 0, self.patch_size[0] - height % self.patch_size[0])
pixel_values = nn.functional.pad(pixel_values, pad_values)
return pixel_values
def forward(self, pixel_values: Optional[torch.FloatTensor]) -> tuple[torch.Tensor, tuple[int]]:
_, num_channels, height, width = pixel_values.shape
pixel_values = self.maybe_pad(pixel_values, height, width)
embeddings = self.projection(pixel_values)
_, _, height, width = embeddings.shape
output_dimensions = (height, width)
embeddings = embeddings.flatten(2).transpose(1, 2)
return (embeddings, output_dimensions)
|
class SwinPatchEmbeddings(nn.Module):
'''
This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial
`hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a
Transformer.
'''
def __init__(self, config):
pass
def maybe_pad(self, pixel_values, height, width):
pass
def forward(self, pixel_values: Optional[torch.FloatTensor]) -> tuple[torch.Tensor, tuple[int]]:
pass
| 4
| 1
| 11
| 1
| 10
| 0
| 2
| 0.2
| 1
| 4
| 0
| 0
| 3
| 6
| 3
| 13
| 41
| 5
| 30
| 17
| 26
| 6
| 30
| 17
| 26
| 3
| 1
| 1
| 7
|
5,476
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/swin/modeling_swin.py
|
transformers.models.swin.modeling_swin.SwinPatchMerging
|
from torch import nn
import torch
class SwinPatchMerging(nn.Module):
"""
Patch Merging Layer.
Args:
input_resolution (`tuple[int]`):
Resolution of input feature.
dim (`int`):
Number of input channels.
norm_layer (`nn.Module`, *optional*, defaults to `nn.LayerNorm`):
Normalization layer class.
"""
def __init__(self, input_resolution: tuple[int], dim: int, norm_layer: nn.Module=nn.LayerNorm) -> None:
super().__init__()
self.input_resolution = input_resolution
self.dim = dim
self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False)
self.norm = norm_layer(4 * dim)
def maybe_pad(self, input_feature, height, width):
should_pad = height % 2 == 1 or width % 2 == 1
if should_pad:
pad_values = (0, 0, 0, width % 2, 0, height % 2)
input_feature = nn.functional.pad(input_feature, pad_values)
return input_feature
def forward(self, input_feature: torch.Tensor, input_dimensions: tuple[int, int]) -> torch.Tensor:
height, width = input_dimensions
batch_size, dim, num_channels = input_feature.shape
input_feature = input_feature.view(batch_size, height, width, num_channels)
input_feature = self.maybe_pad(input_feature, height, width)
input_feature_0 = input_feature[:, 0::2, 0::2, :]
input_feature_1 = input_feature[:, 1::2, 0::2, :]
input_feature_2 = input_feature[:, 0::2, 1::2, :]
input_feature_3 = input_feature[:, 1::2, 1::2, :]
input_feature = torch.cat([input_feature_0, input_feature_1, input_feature_2, input_feature_3], -1)
input_feature = input_feature.view(batch_size, -1, 4 * num_channels)
input_feature = self.norm(input_feature)
input_feature = self.reduction(input_feature)
return input_feature
|
class SwinPatchMerging(nn.Module):
'''
Patch Merging Layer.
Args:
input_resolution (`tuple[int]`):
Resolution of input feature.
dim (`int`):
Number of input channels.
norm_layer (`nn.Module`, *optional*, defaults to `nn.LayerNorm`):
Normalization layer class.
'''
def __init__(self, input_resolution: tuple[int], dim: int, norm_layer: nn.Module=nn.LayerNorm) -> None:
pass
def maybe_pad(self, input_feature, height, width):
pass
def forward(self, input_feature: torch.Tensor, input_dimensions: tuple[int, int]) -> torch.Tensor:
pass
| 4
| 1
| 12
| 1
| 9
| 3
| 1
| 0.67
| 1
| 3
| 0
| 0
| 3
| 4
| 3
| 13
| 52
| 8
| 27
| 16
| 23
| 18
| 27
| 16
| 23
| 2
| 1
| 1
| 4
|
5,477
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/swin/modeling_swin.py
|
transformers.models.swin.modeling_swin.SwinPreTrainedModel
|
from ...utils import ModelOutput, auto_docstring, logging, torch_int
from .configuration_swin import SwinConfig
from torch import nn
from ...modeling_utils import PreTrainedModel
@auto_docstring
class SwinPreTrainedModel(PreTrainedModel):
config: SwinConfig
base_model_prefix = 'swin'
main_input_name = 'pixel_values'
supports_gradient_checkpointing = True
_no_split_modules = ['SwinStage']
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, (nn.Linear, nn.Conv2d)):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
elif isinstance(module, SwinEmbeddings):
if module.mask_token is not None:
module.mask_token.data.zero_()
if module.position_embeddings is not None:
module.position_embeddings.data.zero_()
elif isinstance(module, SwinSelfAttention):
module.relative_position_bias_table.data.zero_()
|
@auto_docstring
class SwinPreTrainedModel(PreTrainedModel):
def _init_weights(self, module):
'''Initialize the weights'''
pass
| 3
| 1
| 11
| 0
| 8
| 3
| 4
| 0.5
| 1
| 0
| 0
| 4
| 1
| 0
| 1
| 1
| 23
| 2
| 14
| 7
| 12
| 7
| 13
| 7
| 11
| 4
| 1
| 2
| 4
|
5,478
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/swin/modeling_swin.py
|
transformers.models.swin.modeling_swin.SwinSelfAttention
|
import collections.abc
import math
from torch import nn
from typing import Optional, Union
import torch
from ...pytorch_utils import find_pruneable_heads_and_indices, meshgrid, prune_linear_layer
class SwinSelfAttention(nn.Module):
def __init__(self, config, dim, num_heads, window_size):
super().__init__()
if dim % num_heads != 0:
raise ValueError(f'The hidden size ({dim}) is not a multiple of the number of attention heads ({num_heads})')
self.num_attention_heads = num_heads
self.attention_head_size = int(dim / num_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.window_size = window_size if isinstance(window_size, collections.abc.Iterable) else (window_size, window_size)
self.relative_position_bias_table = nn.Parameter(torch.zeros((2 * self.window_size[0] - 1) * (2 * self.window_size[1] - 1), num_heads))
coords_h = torch.arange(self.window_size[0])
coords_w = torch.arange(self.window_size[1])
coords = torch.stack(meshgrid([coords_h, coords_w], indexing='ij'))
coords_flatten = torch.flatten(coords, 1)
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :]
relative_coords = relative_coords.permute(1, 2, 0).contiguous()
relative_coords[:, :, 0] += self.window_size[0] - 1
relative_coords[:, :, 1] += self.window_size[1] - 1
relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1
relative_position_index = relative_coords.sum(-1)
self.register_buffer('relative_position_index', relative_position_index)
self.query = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias)
self.key = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias)
self.value = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False) -> tuple[torch.Tensor]:
batch_size, dim, num_channels = hidden_states.shape
hidden_shape = (batch_size, dim, -1, self.attention_head_size)
query_layer = self.query(hidden_states).view(hidden_shape).transpose(1, 2)
key_layer = self.key(hidden_states).view(hidden_shape).transpose(1, 2)
value_layer = self.value(hidden_states).view(hidden_shape).transpose(1, 2)
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)]
relative_position_bias = relative_position_bias.view(self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1)
relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous()
attention_scores = attention_scores + relative_position_bias.unsqueeze(0)
if attention_mask is not None:
mask_shape = attention_mask.shape[0]
attention_scores = attention_scores.view(batch_size // mask_shape, mask_shape, self.num_attention_heads, dim, dim)
attention_scores = attention_scores + attention_mask.unsqueeze(1).unsqueeze(0)
attention_scores = attention_scores.view(-1, self.num_attention_heads, dim, dim)
attention_probs = nn.functional.softmax(attention_scores, dim=-1)
attention_probs = self.dropout(attention_probs)
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(new_context_layer_shape)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
return outputs
|
class SwinSelfAttention(nn.Module):
def __init__(self, config, dim, num_heads, window_size):
pass
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False) -> tuple[torch.Tensor]:
pass
| 3
| 0
| 32
| 6
| 24
| 2
| 3
| 0.1
| 1
| 6
| 0
| 0
| 3
| 9
| 3
| 13
| 98
| 19
| 72
| 38
| 62
| 7
| 56
| 32
| 52
| 4
| 1
| 1
| 8
|
5,479
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/swin/modeling_swin.py
|
transformers.models.swin.modeling_swin.SwinSelfOutput
|
import torch
from torch import nn
class SwinSelfOutput(nn.Module):
def __init__(self, config, dim):
super().__init__()
self.dense = nn.Linear(dim, dim)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
return hidden_states
|
class SwinSelfOutput(nn.Module):
def __init__(self, config, dim):
pass
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 5
| 1
| 4
| 0
| 1
| 0
| 1
| 2
| 0
| 0
| 2
| 2
| 2
| 12
| 11
| 2
| 9
| 5
| 6
| 0
| 9
| 5
| 6
| 1
| 1
| 0
| 2
|
5,480
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/swin/modeling_swin.py
|
transformers.models.swin.modeling_swin.SwinStage
|
from torch import nn
from ...modeling_layers import GradientCheckpointingLayer
import torch
from typing import Optional, Union
class SwinStage(GradientCheckpointingLayer):
def __init__(self, config, dim, input_resolution, depth, num_heads, drop_path, downsample):
super().__init__()
self.config = config
self.dim = dim
self.blocks = nn.ModuleList([SwinLayer(config=config, dim=dim, input_resolution=input_resolution, num_heads=num_heads, drop_path_rate=drop_path[i], shift_size=0 if i % 2 == 0 else config.window_size // 2) for i in range(depth)])
if downsample is not None:
self.downsample = downsample(input_resolution, dim=dim, norm_layer=nn.LayerNorm)
else:
self.downsample = None
self.pointing = False
def forward(self, hidden_states: torch.Tensor, input_dimensions: tuple[int, int], head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False, always_partition: Optional[bool]=False) -> tuple[torch.Tensor]:
height, width = input_dimensions
for i, layer_module in enumerate(self.blocks):
layer_head_mask = head_mask[i] if head_mask is not None else None
layer_outputs = layer_module(hidden_states, input_dimensions, layer_head_mask, output_attentions, always_partition)
hidden_states = layer_outputs[0]
hidden_states_before_downsampling = hidden_states
if self.downsample is not None:
height_downsampled, width_downsampled = ((height + 1) // 2, (width + 1) // 2)
output_dimensions = (height, width, height_downsampled, width_downsampled)
hidden_states = self.downsample(hidden_states_before_downsampling, input_dimensions)
else:
output_dimensions = (height, width, height, width)
stage_outputs = (hidden_states, hidden_states_before_downsampling, output_dimensions)
if output_attentions:
stage_outputs += layer_outputs[1:]
return stage_outputs
|
class SwinStage(GradientCheckpointingLayer):
def __init__(self, config, dim, input_resolution, depth, num_heads, drop_path, downsample):
pass
def forward(self, hidden_states: torch.Tensor, input_dimensions: tuple[int, int], head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False, always_partition: Optional[bool]=False) -> tuple[torch.Tensor]:
pass
| 3
| 0
| 28
| 4
| 24
| 1
| 4
| 0.02
| 1
| 7
| 1
| 0
| 2
| 5
| 2
| 12
| 58
| 8
| 49
| 23
| 39
| 1
| 26
| 16
| 23
| 5
| 1
| 1
| 8
|
5,481
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/swin2sr/configuration_swin2sr.py
|
transformers.models.swin2sr.configuration_swin2sr.Swin2SRConfig
|
from ...configuration_utils import PretrainedConfig
class Swin2SRConfig(PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`Swin2SRModel`]. It is used to instantiate a Swin
Transformer v2 model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the Swin Transformer v2
[caidas/swin2sr-classicalsr-x2-64](https://huggingface.co/caidas/swin2sr-classicalsr-x2-64) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
image_size (`int`, *optional*, defaults to 64):
The size (resolution) of each image.
patch_size (`int`, *optional*, defaults to 1):
The size (resolution) of each patch.
num_channels (`int`, *optional*, defaults to 3):
The number of input channels.
num_channels_out (`int`, *optional*, defaults to `num_channels`):
The number of output channels. If not set, it will be set to `num_channels`.
embed_dim (`int`, *optional*, defaults to 180):
Dimensionality of patch embedding.
depths (`list(int)`, *optional*, defaults to `[6, 6, 6, 6, 6, 6]`):
Depth of each layer in the Transformer encoder.
num_heads (`list(int)`, *optional*, defaults to `[6, 6, 6, 6, 6, 6]`):
Number of attention heads in each layer of the Transformer encoder.
window_size (`int`, *optional*, defaults to 8):
Size of windows.
mlp_ratio (`float`, *optional*, defaults to 2.0):
Ratio of MLP hidden dimensionality to embedding dimensionality.
qkv_bias (`bool`, *optional*, defaults to `True`):
Whether or not a learnable bias should be added to the queries, keys and values.
hidden_dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout probability for all fully connected layers in the embeddings and encoder.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
drop_path_rate (`float`, *optional*, defaults to 0.1):
Stochastic depth rate.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder. If string, `"gelu"`, `"relu"`,
`"selu"` and `"gelu_new"` are supported.
use_absolute_embeddings (`bool`, *optional*, defaults to `False`):
Whether or not to add absolute position embeddings to the patch embeddings.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the layer normalization layers.
upscale (`int`, *optional*, defaults to 2):
The upscale factor for the image. 2/3/4/8 for image super resolution, 1 for denoising and compress artifact
reduction
img_range (`float`, *optional*, defaults to 1.0):
The range of the values of the input image.
resi_connection (`str`, *optional*, defaults to `"1conv"`):
The convolutional block to use before the residual connection in each stage.
upsampler (`str`, *optional*, defaults to `"pixelshuffle"`):
The reconstruction reconstruction module. Can be 'pixelshuffle'/'pixelshuffledirect'/'nearest+conv'/None.
Example:
```python
>>> from transformers import Swin2SRConfig, Swin2SRModel
>>> # Initializing a Swin2SR caidas/swin2sr-classicalsr-x2-64 style configuration
>>> configuration = Swin2SRConfig()
>>> # Initializing a model (with random weights) from the caidas/swin2sr-classicalsr-x2-64 style configuration
>>> model = Swin2SRModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = 'swin2sr'
attribute_map = {'hidden_size': 'embed_dim', 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'}
def __init__(self, image_size=64, patch_size=1, num_channels=3, num_channels_out=None, embed_dim=180, depths=[6, 6, 6, 6, 6, 6], num_heads=[6, 6, 6, 6, 6, 6], window_size=8, mlp_ratio=2.0, qkv_bias=True, hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, drop_path_rate=0.1, hidden_act='gelu', use_absolute_embeddings=False, initializer_range=0.02, layer_norm_eps=1e-05, upscale=2, img_range=1.0, resi_connection='1conv', upsampler='pixelshuffle', **kwargs):
super().__init__(**kwargs)
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.num_channels_out = num_channels if num_channels_out is None else num_channels_out
self.embed_dim = embed_dim
self.depths = depths
self.num_layers = len(depths)
self.num_heads = num_heads
self.window_size = window_size
self.mlp_ratio = mlp_ratio
self.qkv_bias = qkv_bias
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.drop_path_rate = drop_path_rate
self.hidden_act = hidden_act
self.use_absolute_embeddings = use_absolute_embeddings
self.layer_norm_eps = layer_norm_eps
self.initializer_range = initializer_range
self.upscale = upscale
self.img_range = img_range
self.resi_connection = resi_connection
self.upsampler = upsampler
|
class Swin2SRConfig(PretrainedConfig):
'''
This is the configuration class to store the configuration of a [`Swin2SRModel`]. It is used to instantiate a Swin
Transformer v2 model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the Swin Transformer v2
[caidas/swin2sr-classicalsr-x2-64](https://huggingface.co/caidas/swin2sr-classicalsr-x2-64) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
image_size (`int`, *optional*, defaults to 64):
The size (resolution) of each image.
patch_size (`int`, *optional*, defaults to 1):
The size (resolution) of each patch.
num_channels (`int`, *optional*, defaults to 3):
The number of input channels.
num_channels_out (`int`, *optional*, defaults to `num_channels`):
The number of output channels. If not set, it will be set to `num_channels`.
embed_dim (`int`, *optional*, defaults to 180):
Dimensionality of patch embedding.
depths (`list(int)`, *optional*, defaults to `[6, 6, 6, 6, 6, 6]`):
Depth of each layer in the Transformer encoder.
num_heads (`list(int)`, *optional*, defaults to `[6, 6, 6, 6, 6, 6]`):
Number of attention heads in each layer of the Transformer encoder.
window_size (`int`, *optional*, defaults to 8):
Size of windows.
mlp_ratio (`float`, *optional*, defaults to 2.0):
Ratio of MLP hidden dimensionality to embedding dimensionality.
qkv_bias (`bool`, *optional*, defaults to `True`):
Whether or not a learnable bias should be added to the queries, keys and values.
hidden_dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout probability for all fully connected layers in the embeddings and encoder.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
drop_path_rate (`float`, *optional*, defaults to 0.1):
Stochastic depth rate.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder. If string, `"gelu"`, `"relu"`,
`"selu"` and `"gelu_new"` are supported.
use_absolute_embeddings (`bool`, *optional*, defaults to `False`):
Whether or not to add absolute position embeddings to the patch embeddings.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the layer normalization layers.
upscale (`int`, *optional*, defaults to 2):
The upscale factor for the image. 2/3/4/8 for image super resolution, 1 for denoising and compress artifact
reduction
img_range (`float`, *optional*, defaults to 1.0):
The range of the values of the input image.
resi_connection (`str`, *optional*, defaults to `"1conv"`):
The convolutional block to use before the residual connection in each stage.
upsampler (`str`, *optional*, defaults to `"pixelshuffle"`):
The reconstruction reconstruction module. Can be 'pixelshuffle'/'pixelshuffledirect'/'nearest+conv'/None.
Example:
```python
>>> from transformers import Swin2SRConfig, Swin2SRModel
>>> # Initializing a Swin2SR caidas/swin2sr-classicalsr-x2-64 style configuration
>>> configuration = Swin2SRConfig()
>>> # Initializing a model (with random weights) from the caidas/swin2sr-classicalsr-x2-64 style configuration
>>> model = Swin2SRModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```'''
def __init__(self, image_size=64, patch_size=1, num_channels=3, num_channels_out=None, embed_dim=180, depths=[6, 6, 6, 6, 6, 6], num_heads=[6, 6, 6, 6, 6, 6], window_size=8, mlp_ratio=2.0, qkv_bias=True, hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, drop_path_rate=0.1, hidden_act='gelu', use_absolute_embeddings=False, initializer_range=0.02, layer_norm_eps=1e-05, upscale=2, img_range=1.0, resi_connection='1conv', upsampler='pixelshuffle', **kwargs):
pass
| 2
| 1
| 49
| 1
| 48
| 0
| 2
| 1.13
| 1
| 1
| 0
| 0
| 1
| 22
| 1
| 1
| 128
| 11
| 55
| 50
| 29
| 62
| 27
| 26
| 25
| 2
| 1
| 0
| 2
|
5,482
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/swin2sr/image_processing_swin2sr.py
|
transformers.models.swin2sr.image_processing_swin2sr.Swin2SRImageProcessor
|
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...utils import TensorType, filter_out_non_signature_kwargs, logging
import numpy as np
from typing import Optional, Union
from ...image_transforms import get_image_size, pad, to_channel_dimension_format
from ...utils.deprecation import deprecate_kwarg
from ...image_utils import ChannelDimension, ImageInput, infer_channel_dimension_format, is_scaled_image, make_flat_list_of_images, to_numpy_array, valid_images, validate_preprocess_arguments
class Swin2SRImageProcessor(BaseImageProcessor):
"""
Constructs a Swin2SR image processor.
Args:
do_rescale (`bool`, *optional*, defaults to `True`):
Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale`
parameter in the `preprocess` method.
rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the
`preprocess` method.
"""
model_input_names = ['pixel_values']
def __init__(self, do_rescale: bool=True, rescale_factor: Union[int, float]=1 / 255, do_pad: bool=True, size_divisor: int=8, **kwargs) -> None:
super().__init__(**kwargs)
self.do_rescale = do_rescale
self.rescale_factor = rescale_factor
self.do_pad = do_pad
pad_size = kwargs.get('pad_size')
self.size_divisor = size_divisor if size_divisor is not None else pad_size
@property
def pad_size(self):
logger.warning('`self.pad_size` attribute is deprecated and will be removed in v5. Use `self.size_divisor` instead')
return self.size_divisor
@pad_size.setter
def pad_size(self, value):
logger.warning('`self.pad_size` attribute is deprecated and will be removed in v5. Use `self.size_divisor` instead')
self.size_divisor = value
def pad(self, image: np.ndarray, size: int, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None):
"""
Pad an image to make the height and width divisible by `size`.
Args:
image (`np.ndarray`):
Image to pad.
size (`int`):
The size to make the height and width divisible by.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format for the output image. If unset, the channel dimension format of the input
image is used. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
input_data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
Returns:
`np.ndarray`: The padded image.
"""
old_height, old_width = get_image_size(image, input_data_format)
pad_height = (old_height // size + 1) * size - old_height
pad_width = (old_width // size + 1) * size - old_width
return pad(image, ((0, pad_height), (0, pad_width)), mode='symmetric', data_format=data_format, input_data_format=input_data_format)
@filter_out_non_signature_kwargs()
@deprecate_kwarg('pad_size', version='v5', new_name='size_divisor')
def preprocess(self, images: ImageInput, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_pad: Optional[bool]=None, size_divisor: Optional[int]=None, return_tensors: Optional[Union[str, TensorType]]=None, data_format: Union[str, ChannelDimension]=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None):
"""
Preprocess an image or batch of images.
Args:
images (`ImageInput`):
Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
passing in images with pixel values between 0 and 1, set `do_rescale=False`.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the image values between [0 - 1].
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
do_pad (`bool`, *optional*, defaults to `True`):
Whether to pad the image to make the height and width divisible by `window_size`.
size_divisor (`int`, *optional*, defaults to 32):
The size of the sliding window for the local attention.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- Unset: Use the channel dimension format of the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
"""
do_rescale = do_rescale if do_rescale is not None else self.do_rescale
rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
do_pad = do_pad if do_pad is not None else self.do_pad
size_divisor = size_divisor if size_divisor is not None else self.size_divisor
images = make_flat_list_of_images(images)
if not valid_images(images):
raise ValueError('Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, or torch.Tensor')
validate_preprocess_arguments(do_rescale=do_rescale, rescale_factor=rescale_factor)
images = [to_numpy_array(image) for image in images]
if do_rescale and is_scaled_image(images[0]):
logger.warning_once('It looks like you are trying to rescale already rescaled images. If the input images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again.')
if input_data_format is None:
input_data_format = infer_channel_dimension_format(images[0])
if do_rescale:
images = [self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format) for image in images]
if do_pad:
images = [self.pad(image, size=size_divisor, input_data_format=input_data_format) for image in images]
images = [to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images]
data = {'pixel_values': images}
return BatchFeature(data=data, tensor_type=return_tensors)
|
class Swin2SRImageProcessor(BaseImageProcessor):
'''
Constructs a Swin2SR image processor.
Args:
do_rescale (`bool`, *optional*, defaults to `True`):
Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale`
parameter in the `preprocess` method.
rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the
`preprocess` method.
'''
def __init__(self, do_rescale: bool=True, rescale_factor: Union[int, float]=1 / 255, do_pad: bool=True, size_divisor: int=8, **kwargs) -> None:
pass
@property
def pad_size(self):
pass
@pad_size.setter
def pad_size(self):
pass
def pad_size(self):
'''
Pad an image to make the height and width divisible by `size`.
Args:
image (`np.ndarray`):
Image to pad.
size (`int`):
The size to make the height and width divisible by.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format for the output image. If unset, the channel dimension format of the input
image is used. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
input_data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
Returns:
`np.ndarray`: The padded image.
'''
pass
@filter_out_non_signature_kwargs()
@deprecate_kwarg('pad_size', version='v5', new_name='size_divisor')
def preprocess(self, images: ImageInput, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_pad: Optional[bool]=None, size_divisor: Optional[int]=None, return_tensors: Optional[Union[str, TensorType]]=None, data_format: Union[str, ChannelDimension]=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None):
'''
Preprocess an image or batch of images.
Args:
images (`ImageInput`):
Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
passing in images with pixel values between 0 and 1, set `do_rescale=False`.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the image values between [0 - 1].
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
do_pad (`bool`, *optional*, defaults to `True`):
Whether to pad the image to make the height and width divisible by `window_size`.
size_divisor (`int`, *optional*, defaults to 32):
The size of the sliding window for the local attention.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- Unset: Use the channel dimension format of the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
'''
pass
| 10
| 3
| 49
| 5
| 26
| 19
| 4
| 0.84
| 1
| 8
| 2
| 0
| 3
| 4
| 3
| 23
| 165
| 19
| 80
| 37
| 52
| 67
| 34
| 13
| 30
| 10
| 3
| 1
| 12
|
5,483
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/swin2sr/modeling_swin2sr.py
|
transformers.models.swin2sr.modeling_swin2sr.NearestConvUpsampler
|
import torch
from torch import nn
class NearestConvUpsampler(nn.Module):
def __init__(self, config, num_features):
super().__init__()
if config.upscale != 4:
raise ValueError('The nearest+conv upsampler only supports an upscale factor of 4 at the moment.')
self.conv_before_upsample = nn.Conv2d(config.embed_dim, num_features, 3, 1, 1)
self.activation = nn.LeakyReLU(inplace=True)
self.conv_up1 = nn.Conv2d(num_features, num_features, 3, 1, 1)
self.conv_up2 = nn.Conv2d(num_features, num_features, 3, 1, 1)
self.conv_hr = nn.Conv2d(num_features, num_features, 3, 1, 1)
self.final_convolution = nn.Conv2d(num_features, config.num_channels_out, 3, 1, 1)
self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)
def forward(self, sequence_output):
sequence_output = self.conv_before_upsample(sequence_output)
sequence_output = self.activation(sequence_output)
sequence_output = self.lrelu(self.conv_up1(torch.nn.functional.interpolate(sequence_output, scale_factor=2, mode='nearest')))
sequence_output = self.lrelu(self.conv_up2(torch.nn.functional.interpolate(sequence_output, scale_factor=2, mode='nearest')))
reconstruction = self.final_convolution(self.lrelu(self.conv_hr(sequence_output)))
return reconstruction
|
class NearestConvUpsampler(nn.Module):
def __init__(self, config, num_features):
pass
def forward(self, sequence_output):
pass
| 3
| 0
| 12
| 1
| 11
| 0
| 2
| 0
| 1
| 2
| 0
| 0
| 2
| 7
| 2
| 12
| 25
| 2
| 23
| 11
| 20
| 0
| 19
| 11
| 16
| 2
| 1
| 1
| 3
|
5,484
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/swin2sr/modeling_swin2sr.py
|
transformers.models.swin2sr.modeling_swin2sr.PixelShuffleAuxUpsampler
|
from torch import nn
class PixelShuffleAuxUpsampler(nn.Module):
def __init__(self, config, num_features):
super().__init__()
self.upscale = config.upscale
self.conv_bicubic = nn.Conv2d(config.num_channels, num_features, 3, 1, 1)
self.conv_before_upsample = nn.Conv2d(config.embed_dim, num_features, 3, 1, 1)
self.activation = nn.LeakyReLU(inplace=True)
self.conv_aux = nn.Conv2d(num_features, config.num_channels, 3, 1, 1)
self.conv_after_aux = nn.Sequential(nn.Conv2d(3, num_features, 3, 1, 1), nn.LeakyReLU(inplace=True))
self.upsample = Upsample(config.upscale, num_features)
self.final_convolution = nn.Conv2d(num_features, config.num_channels_out, 3, 1, 1)
def forward(self, sequence_output, bicubic, height, width):
bicubic = self.conv_bicubic(bicubic)
sequence_output = self.conv_before_upsample(sequence_output)
sequence_output = self.activation(sequence_output)
aux = self.conv_aux(sequence_output)
sequence_output = self.conv_after_aux(aux)
sequence_output = self.upsample(sequence_output)[:, :, :height * self.upscale, :width * self.upscale] + bicubic[:, :, :height * self.upscale, :width * self.upscale]
reconstruction = self.final_convolution(sequence_output)
return (reconstruction, aux)
|
class PixelShuffleAuxUpsampler(nn.Module):
def __init__(self, config, num_features):
pass
def forward(self, sequence_output, bicubic, height, width):
pass
| 3
| 0
| 12
| 1
| 11
| 0
| 1
| 0
| 1
| 2
| 1
| 0
| 2
| 8
| 2
| 12
| 26
| 3
| 23
| 13
| 20
| 0
| 20
| 13
| 17
| 1
| 1
| 0
| 2
|
5,485
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/swin2sr/modeling_swin2sr.py
|
transformers.models.swin2sr.modeling_swin2sr.PixelShuffleUpsampler
|
from torch import nn
class PixelShuffleUpsampler(nn.Module):
def __init__(self, config, num_features):
super().__init__()
self.conv_before_upsample = nn.Conv2d(config.embed_dim, num_features, 3, 1, 1)
self.activation = nn.LeakyReLU(inplace=True)
self.upsample = Upsample(config.upscale, num_features)
self.final_convolution = nn.Conv2d(num_features, config.num_channels_out, 3, 1, 1)
def forward(self, sequence_output):
x = self.conv_before_upsample(sequence_output)
x = self.activation(x)
x = self.upsample(x)
x = self.final_convolution(x)
return x
|
class PixelShuffleUpsampler(nn.Module):
def __init__(self, config, num_features):
pass
def forward(self, sequence_output):
pass
| 3
| 0
| 7
| 1
| 6
| 0
| 1
| 0
| 1
| 2
| 1
| 0
| 2
| 4
| 2
| 12
| 15
| 2
| 13
| 8
| 10
| 0
| 13
| 8
| 10
| 1
| 1
| 0
| 2
|
5,486
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/swin2sr/modeling_swin2sr.py
|
transformers.models.swin2sr.modeling_swin2sr.Swin2SRAttention
|
from typing import Optional, Union
from torch import nn
import torch
import collections.abc
from ...pytorch_utils import find_pruneable_heads_and_indices, meshgrid, prune_linear_layer
class Swin2SRAttention(nn.Module):
def __init__(self, config, dim, num_heads, window_size, pretrained_window_size=0):
super().__init__()
self.self = Swin2SRSelfAttention(config=config, dim=dim, num_heads=num_heads, window_size=window_size, pretrained_window_size=pretrained_window_size if isinstance(pretrained_window_size, collections.abc.Iterable) else (pretrained_window_size, pretrained_window_size))
self.output = Swin2SRSelfOutput(config, dim)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads)
self.self.query = prune_linear_layer(self.self.query, index)
self.self.key = prune_linear_layer(self.self.key, index)
self.self.value = prune_linear_layer(self.self.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False) -> tuple[torch.Tensor]:
self_outputs = self.self(hidden_states, attention_mask, head_mask, output_attentions)
attention_output = self.output(self_outputs[0], hidden_states)
outputs = (attention_output,) + self_outputs[1:]
return outputs
|
class Swin2SRAttention(nn.Module):
def __init__(self, config, dim, num_heads, window_size, pretrained_window_size=0):
pass
def prune_heads(self, heads):
pass
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False) -> tuple[torch.Tensor]:
pass
| 4
| 0
| 14
| 1
| 12
| 1
| 2
| 0.08
| 1
| 7
| 2
| 0
| 3
| 3
| 3
| 13
| 44
| 4
| 38
| 17
| 28
| 3
| 22
| 11
| 18
| 2
| 1
| 1
| 5
|
5,487
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/swin2sr/modeling_swin2sr.py
|
transformers.models.swin2sr.modeling_swin2sr.Swin2SRDropPath
|
import torch
from typing import Optional, Union
from torch import nn
class Swin2SRDropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
def __init__(self, drop_prob: Optional[float]=None) -> None:
super().__init__()
self.drop_prob = drop_prob
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
return drop_path(hidden_states, self.drop_prob, self.training)
def extra_repr(self) -> str:
return f'p={self.drop_prob}'
|
class Swin2SRDropPath(nn.Module):
'''Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).'''
def __init__(self, drop_prob: Optional[float]=None) -> None:
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
def extra_repr(self) -> str:
pass
| 4
| 1
| 2
| 0
| 2
| 0
| 1
| 0.13
| 1
| 4
| 0
| 0
| 3
| 1
| 3
| 13
| 12
| 3
| 8
| 5
| 4
| 1
| 8
| 5
| 4
| 1
| 1
| 0
| 3
|
5,488
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/swin2sr/modeling_swin2sr.py
|
transformers.models.swin2sr.modeling_swin2sr.Swin2SREmbeddings
|
from torch import nn
from typing import Optional, Union
import torch
class Swin2SREmbeddings(nn.Module):
"""
Construct the patch and optional position embeddings.
"""
def __init__(self, config):
super().__init__()
self.patch_embeddings = Swin2SRPatchEmbeddings(config)
num_patches = self.patch_embeddings.num_patches
if config.use_absolute_embeddings:
self.position_embeddings = nn.Parameter(torch.zeros(1, num_patches + 1, config.embed_dim))
else:
self.position_embeddings = None
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.window_size = config.window_size
def forward(self, pixel_values: Optional[torch.FloatTensor]) -> tuple[torch.Tensor]:
embeddings, output_dimensions = self.patch_embeddings(pixel_values)
if self.position_embeddings is not None:
embeddings = embeddings + self.position_embeddings
embeddings = self.dropout(embeddings)
return (embeddings, output_dimensions)
|
class Swin2SREmbeddings(nn.Module):
'''
Construct the patch and optional position embeddings.
'''
def __init__(self, config):
pass
def forward(self, pixel_values: Optional[torch.FloatTensor]) -> tuple[torch.Tensor]:
pass
| 3
| 1
| 11
| 3
| 8
| 0
| 2
| 0.18
| 1
| 3
| 1
| 0
| 2
| 4
| 2
| 12
| 28
| 8
| 17
| 9
| 14
| 3
| 16
| 9
| 13
| 2
| 1
| 1
| 4
|
5,489
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/swin2sr/modeling_swin2sr.py
|
transformers.models.swin2sr.modeling_swin2sr.Swin2SREncoder
|
from typing import Optional, Union
import torch
from torch import nn
class Swin2SREncoder(nn.Module):
def __init__(self, config, grid_size):
super().__init__()
self.num_stages = len(config.depths)
self.config = config
dpr = [x.item() for x in torch.linspace(0, config.drop_path_rate, sum(config.depths), device='cpu')]
self.stages = nn.ModuleList([Swin2SRStage(config=config, dim=config.embed_dim, input_resolution=(grid_size[0], grid_size[1]), depth=config.depths[stage_idx], num_heads=config.num_heads[stage_idx], drop_path=dpr[sum(config.depths[:stage_idx]):sum(config.depths[:stage_idx + 1])], pretrained_window_size=0) for stage_idx in range(self.num_stages)])
self.gradient_checkpointing = False
def forward(self, hidden_states: torch.Tensor, input_dimensions: tuple[int, int], head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False, output_hidden_states: Optional[bool]=False, return_dict: Optional[bool]=True) -> Union[tuple, Swin2SREncoderOutput]:
all_input_dimensions = ()
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
if output_hidden_states:
all_hidden_states += (hidden_states,)
for i, stage_module in enumerate(self.stages):
layer_head_mask = head_mask[i] if head_mask is not None else None
layer_outputs = stage_module(hidden_states, input_dimensions, layer_head_mask, output_attentions)
hidden_states = layer_outputs[0]
output_dimensions = layer_outputs[1]
input_dimensions = (output_dimensions[-2], output_dimensions[-1])
all_input_dimensions += (input_dimensions,)
if output_hidden_states:
all_hidden_states += (hidden_states,)
if output_attentions:
all_self_attentions += layer_outputs[2:]
if not return_dict:
return tuple((v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None))
return Swin2SREncoderOutput(last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions)
|
class Swin2SREncoder(nn.Module):
def __init__(self, config, grid_size):
pass
def forward(self, hidden_states: torch.Tensor, input_dimensions: tuple[int, int], head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False, output_hidden_states: Optional[bool]=False, return_dict: Optional[bool]=True) -> Union[tuple, Swin2SREncoderOutput]:
pass
| 3
| 0
| 34
| 5
| 29
| 0
| 6
| 0
| 1
| 9
| 2
| 0
| 2
| 4
| 2
| 12
| 69
| 11
| 58
| 23
| 47
| 0
| 30
| 15
| 27
| 10
| 1
| 2
| 11
|
5,490
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/swin2sr/modeling_swin2sr.py
|
transformers.models.swin2sr.modeling_swin2sr.Swin2SREncoderOutput
|
from dataclasses import dataclass
from typing import Optional, Union
from ...utils import ModelOutput, auto_docstring, logging
import torch
@dataclass
@auto_docstring(custom_intro="\n Swin2SR encoder's outputs, with potential hidden states and attentions.\n ")
class Swin2SREncoderOutput(ModelOutput):
last_hidden_state: Optional[torch.FloatTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor]] = None
attentions: Optional[tuple[torch.FloatTensor]] = None
|
@dataclass
@auto_docstring(custom_intro="\n Swin2SR encoder's outputs, with potential hidden states and attentions.\n ")
class Swin2SREncoderOutput(ModelOutput):
pass
| 3
| 0
| 0
| 0
| 0
| 0
| 0
| 3.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 23
| 4
| 4
| 4
| 3
| 15
| 4
| 4
| 3
| 0
| 1
| 0
| 0
|
5,491
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/swin2sr/modeling_swin2sr.py
|
transformers.models.swin2sr.modeling_swin2sr.Swin2SRForImageSuperResolution
|
from ...utils import ModelOutput, auto_docstring, logging
from ...modeling_outputs import BaseModelOutput, ImageSuperResolutionOutput
from typing import Optional, Union
import torch
from torch import nn
@auto_docstring(custom_intro='\n Swin2SR Model transformer with an upsampler head on top for image super resolution and restoration.\n ')
class Swin2SRForImageSuperResolution(Swin2SRPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.swin2sr = Swin2SRModel(config)
self.upsampler = config.upsampler
self.upscale = config.upscale
num_features = 64
if self.upsampler == 'pixelshuffle':
self.upsample = PixelShuffleUpsampler(config, num_features)
elif self.upsampler == 'pixelshuffle_aux':
self.upsample = PixelShuffleAuxUpsampler(config, num_features)
elif self.upsampler == 'pixelshuffledirect':
self.upsample = UpsampleOneStep(config.upscale, config.embed_dim, config.num_channels_out)
elif self.upsampler == 'nearest+conv':
self.upsample = NearestConvUpsampler(config, num_features)
else:
self.final_convolution = nn.Conv2d(config.embed_dim, config.num_channels_out, 3, 1, 1)
self.post_init()
@auto_docstring
def forward(self, pixel_values: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, ImageSuperResolutionOutput]:
"""
Example:
```python
>>> import torch
>>> import numpy as np
>>> from PIL import Image
>>> import requests
>>> from transformers import AutoImageProcessor, Swin2SRForImageSuperResolution
>>> processor = AutoImageProcessor.from_pretrained("caidas/swin2SR-classical-sr-x2-64")
>>> model = Swin2SRForImageSuperResolution.from_pretrained("caidas/swin2SR-classical-sr-x2-64")
>>> url = "https://huggingface.co/spaces/jjourney1125/swin2sr/resolve/main/samples/butterfly.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> # prepare image for the model
>>> inputs = processor(image, return_tensors="pt")
>>> # forward pass
>>> with torch.no_grad():
... outputs = model(**inputs)
>>> output = outputs.reconstruction.data.squeeze().float().cpu().clamp_(0, 1).numpy()
>>> output = np.moveaxis(output, source=0, destination=-1)
>>> output = (output * 255.0).round().astype(np.uint8) # float32 to uint8
>>> # you can visualize `output` with `Image.fromarray`
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
loss = None
if labels is not None:
raise NotImplementedError('Training is not supported at the moment')
height, width = pixel_values.shape[2:]
if self.config.upsampler == 'pixelshuffle_aux':
bicubic = nn.functional.interpolate(pixel_values, size=(height * self.upscale, width * self.upscale), mode='bicubic', align_corners=False)
outputs = self.swin2sr(pixel_values, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
sequence_output = outputs[0]
if self.upsampler in ['pixelshuffle', 'pixelshuffledirect', 'nearest+conv']:
reconstruction = self.upsample(sequence_output)
elif self.upsampler == 'pixelshuffle_aux':
reconstruction, aux = self.upsample(sequence_output, bicubic, height, width)
aux = aux / self.swin2sr.img_range + self.swin2sr.mean
else:
reconstruction = pixel_values + self.final_convolution(sequence_output)
reconstruction = reconstruction / self.swin2sr.img_range + self.swin2sr.mean
reconstruction = reconstruction[:, :, :height * self.upscale, :width * self.upscale]
if not return_dict:
output = (reconstruction,) + outputs[1:]
return (loss,) + output if loss is not None else output
return ImageSuperResolutionOutput(loss=loss, reconstruction=reconstruction, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
|
@auto_docstring(custom_intro='\n Swin2SR Model transformer with an upsampler head on top for image super resolution and restoration.\n ')
class Swin2SRForImageSuperResolution(Swin2SRPreTrainedModel):
def __init__(self, config):
pass
@auto_docstring
def forward(self, pixel_values: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, ImageSuperResolutionOutput]:
'''
Example:
```python
>>> import torch
>>> import numpy as np
>>> from PIL import Image
>>> import requests
>>> from transformers import AutoImageProcessor, Swin2SRForImageSuperResolution
>>> processor = AutoImageProcessor.from_pretrained("caidas/swin2SR-classical-sr-x2-64")
>>> model = Swin2SRForImageSuperResolution.from_pretrained("caidas/swin2SR-classical-sr-x2-64")
>>> url = "https://huggingface.co/spaces/jjourney1125/swin2sr/resolve/main/samples/butterfly.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> # prepare image for the model
>>> inputs = processor(image, return_tensors="pt")
>>> # forward pass
>>> with torch.no_grad():
... outputs = model(**inputs)
>>> output = outputs.reconstruction.data.squeeze().float().cpu().clamp_(0, 1).numpy()
>>> output = np.moveaxis(output, source=0, destination=-1)
>>> output = (output * 255.0).round().astype(np.uint8) # float32 to uint8
>>> # you can visualize `output` with `Image.fromarray`
```'''
pass
| 5
| 1
| 55
| 9
| 32
| 14
| 7
| 0.42
| 1
| 9
| 6
| 0
| 2
| 5
| 2
| 3
| 114
| 19
| 67
| 26
| 54
| 28
| 35
| 17
| 32
| 8
| 2
| 1
| 13
|
5,492
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/swin2sr/modeling_swin2sr.py
|
transformers.models.swin2sr.modeling_swin2sr.Swin2SRIntermediate
|
import torch
from ...activations import ACT2FN
from torch import nn
class Swin2SRIntermediate(nn.Module):
def __init__(self, config, dim):
super().__init__()
self.dense = nn.Linear(dim, int(config.mlp_ratio * dim))
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
|
class Swin2SRIntermediate(nn.Module):
def __init__(self, config, dim):
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 6
| 0
| 6
| 0
| 2
| 0
| 1
| 4
| 0
| 0
| 2
| 2
| 2
| 12
| 13
| 1
| 12
| 5
| 9
| 0
| 11
| 5
| 8
| 2
| 1
| 1
| 3
|
5,493
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/swin2sr/modeling_swin2sr.py
|
transformers.models.swin2sr.modeling_swin2sr.Swin2SRLayer
|
from torch import nn
from typing import Optional, Union
import torch
import collections.abc
class Swin2SRLayer(nn.Module):
def __init__(self, config, dim, input_resolution, num_heads, drop_path_rate=0.0, shift_size=0, pretrained_window_size=0):
super().__init__()
self.input_resolution = input_resolution
window_size, shift_size = self._compute_window_shift((config.window_size, config.window_size), (shift_size, shift_size))
self.window_size = window_size[0]
self.shift_size = shift_size[0]
self.attention = Swin2SRAttention(config=config, dim=dim, num_heads=num_heads, window_size=self.window_size, pretrained_window_size=pretrained_window_size if isinstance(pretrained_window_size, collections.abc.Iterable) else (pretrained_window_size, pretrained_window_size))
self.layernorm_before = nn.LayerNorm(dim, eps=config.layer_norm_eps)
self.drop_path = Swin2SRDropPath(drop_path_rate) if drop_path_rate > 0.0 else nn.Identity()
self.intermediate = Swin2SRIntermediate(config, dim)
self.output = Swin2SROutput(config, dim)
self.layernorm_after = nn.LayerNorm(dim, eps=config.layer_norm_eps)
def _compute_window_shift(self, target_window_size, target_shift_size) -> tuple[tuple[int, int], tuple[int, int]]:
window_size = [r if r <= w else w for r, w in zip(self.input_resolution, target_window_size)]
shift_size = [0 if r <= w else s for r, w, s in zip(self.input_resolution, window_size, target_shift_size)]
return (window_size, shift_size)
def get_attn_mask(self, height, width, dtype):
if self.shift_size > 0:
img_mask = torch.zeros((1, height, width, 1), dtype=dtype)
height_slices = (slice(0, -self.window_size), slice(-self.window_size, -self.shift_size), slice(-self.shift_size, None))
width_slices = (slice(0, -self.window_size), slice(-self.window_size, -self.shift_size), slice(-self.shift_size, None))
count = 0
for height_slice in height_slices:
for width_slice in width_slices:
img_mask[:, height_slice, width_slice, :] = count
count += 1
mask_windows = window_partition(img_mask, self.window_size)
mask_windows = mask_windows.view(-1, self.window_size * self.window_size)
attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
attn_mask = attn_mask.masked_fill(attn_mask != 0, -100.0).masked_fill(attn_mask == 0, 0.0)
else:
attn_mask = None
return attn_mask
def maybe_pad(self, hidden_states, height, width):
pad_right = (self.window_size - width % self.window_size) % self.window_size
pad_bottom = (self.window_size - height % self.window_size) % self.window_size
pad_values = (0, 0, 0, pad_right, 0, pad_bottom)
hidden_states = nn.functional.pad(hidden_states, pad_values)
return (hidden_states, pad_values)
def forward(self, hidden_states: torch.Tensor, input_dimensions: tuple[int, int], head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False) -> tuple[torch.Tensor, torch.Tensor]:
height, width = input_dimensions
batch_size, _, channels = hidden_states.size()
shortcut = hidden_states
hidden_states = hidden_states.view(batch_size, height, width, channels)
hidden_states, pad_values = self.maybe_pad(hidden_states, height, width)
_, height_pad, width_pad, _ = hidden_states.shape
if self.shift_size > 0:
shifted_hidden_states = torch.roll(hidden_states, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2))
else:
shifted_hidden_states = hidden_states
hidden_states_windows = window_partition(shifted_hidden_states, self.window_size)
hidden_states_windows = hidden_states_windows.view(-1, self.window_size * self.window_size, channels)
attn_mask = self.get_attn_mask(height_pad, width_pad, dtype=hidden_states.dtype)
if attn_mask is not None:
attn_mask = attn_mask.to(hidden_states_windows.device)
attention_outputs = self.attention(hidden_states_windows, attn_mask, head_mask, output_attentions=output_attentions)
attention_output = attention_outputs[0]
attention_windows = attention_output.view(-1, self.window_size, self.window_size, channels)
shifted_windows = window_reverse(attention_windows, self.window_size, height_pad, width_pad)
if self.shift_size > 0:
attention_windows = torch.roll(shifted_windows, shifts=(self.shift_size, self.shift_size), dims=(1, 2))
else:
attention_windows = shifted_windows
was_padded = pad_values[3] > 0 or pad_values[5] > 0
if was_padded:
attention_windows = attention_windows[:, :height, :width, :].contiguous()
attention_windows = attention_windows.view(batch_size, height * width, channels)
hidden_states = self.layernorm_before(attention_windows)
hidden_states = shortcut + self.drop_path(hidden_states)
layer_output = self.intermediate(hidden_states)
layer_output = self.output(layer_output)
layer_output = hidden_states + self.drop_path(self.layernorm_after(layer_output))
layer_outputs = (layer_output, attention_outputs[1]) if output_attentions else (layer_output,)
return layer_outputs
|
class Swin2SRLayer(nn.Module):
def __init__(self, config, dim, input_resolution, num_heads, drop_path_rate=0.0, shift_size=0, pretrained_window_size=0):
pass
def _compute_window_shift(self, target_window_size, target_shift_size) -> tuple[tuple[int, int], tuple[int, int]]:
pass
def get_attn_mask(self, height, width, dtype):
pass
def maybe_pad(self, hidden_states, height, width):
pass
def forward(self, hidden_states: torch.Tensor, input_dimensions: tuple[int, int], head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False) -> tuple[torch.Tensor, torch.Tensor]:
pass
| 6
| 0
| 24
| 2
| 20
| 1
| 3
| 0.05
| 1
| 12
| 4
| 0
| 5
| 9
| 5
| 15
| 123
| 15
| 103
| 52
| 89
| 5
| 72
| 44
| 66
| 6
| 1
| 3
| 17
|
5,494
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/swin2sr/modeling_swin2sr.py
|
transformers.models.swin2sr.modeling_swin2sr.Swin2SRModel
|
from torch import nn
from ...utils import ModelOutput, auto_docstring, logging
from ...modeling_outputs import BaseModelOutput, ImageSuperResolutionOutput
from typing import Optional, Union
import torch
@auto_docstring
class Swin2SRModel(Swin2SRPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.config = config
if config.num_channels == 3 and config.num_channels_out == 3:
mean = torch.tensor([0.4488, 0.4371, 0.404]).view(1, 3, 1, 1)
else:
mean = torch.zeros(1, 1, 1, 1)
self.register_buffer('mean', mean, persistent=False)
self.img_range = config.img_range
self.first_convolution = nn.Conv2d(config.num_channels, config.embed_dim, 3, 1, 1)
self.embeddings = Swin2SREmbeddings(config)
self.encoder = Swin2SREncoder(config, grid_size=self.embeddings.patch_embeddings.patches_resolution)
self.layernorm = nn.LayerNorm(config.embed_dim, eps=config.layer_norm_eps)
self.patch_unembed = Swin2SRPatchUnEmbeddings(config)
self.conv_after_body = nn.Conv2d(config.embed_dim, config.embed_dim, 3, 1, 1)
self.post_init()
def get_input_embeddings(self):
return self.embeddings.patch_embeddings
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
def pad_and_normalize(self, pixel_values):
_, _, height, width = pixel_values.size()
window_size = self.config.window_size
modulo_pad_height = (window_size - height % window_size) % window_size
modulo_pad_width = (window_size - width % window_size) % window_size
pixel_values = nn.functional.pad(pixel_values, (0, modulo_pad_width, 0, modulo_pad_height), 'reflect')
mean = self.mean.type_as(pixel_values)
pixel_values = (pixel_values - mean) * self.img_range
return pixel_values
@auto_docstring
def forward(self, pixel_values: torch.FloatTensor, head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutput]:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
head_mask = self.get_head_mask(head_mask, len(self.config.depths))
_, _, height, width = pixel_values.shape
pixel_values = self.pad_and_normalize(pixel_values)
embeddings = self.first_convolution(pixel_values)
embedding_output, input_dimensions = self.embeddings(embeddings)
encoder_outputs = self.encoder(embedding_output, input_dimensions, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
sequence_output = encoder_outputs[0]
sequence_output = self.layernorm(sequence_output)
sequence_output = self.patch_unembed(sequence_output, (height, width))
sequence_output = self.conv_after_body(sequence_output) + embeddings
if not return_dict:
output = (sequence_output,) + encoder_outputs[1:]
return output
return BaseModelOutput(last_hidden_state=sequence_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions)
|
@auto_docstring
class Swin2SRModel(Swin2SRPreTrainedModel):
def __init__(self, config):
pass
def get_input_embeddings(self):
pass
def _prune_heads(self, heads_to_prune):
'''
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
'''
pass
def pad_and_normalize(self, pixel_values):
pass
@auto_docstring
def forward(self, pixel_values: torch.FloatTensor, head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutput]:
pass
| 8
| 1
| 20
| 3
| 14
| 3
| 2
| 0.17
| 1
| 7
| 4
| 0
| 5
| 9
| 5
| 6
| 111
| 21
| 77
| 35
| 56
| 13
| 48
| 27
| 42
| 5
| 2
| 1
| 11
|
5,495
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/swin2sr/modeling_swin2sr.py
|
transformers.models.swin2sr.modeling_swin2sr.Swin2SROutput
|
import torch
from torch import nn
class Swin2SROutput(nn.Module):
def __init__(self, config, dim):
super().__init__()
self.dense = nn.Linear(int(config.mlp_ratio * dim), dim)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
return hidden_states
|
class Swin2SROutput(nn.Module):
def __init__(self, config, dim):
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 4
| 0
| 4
| 0
| 1
| 0
| 1
| 3
| 0
| 0
| 2
| 2
| 2
| 12
| 10
| 1
| 9
| 5
| 6
| 0
| 9
| 5
| 6
| 1
| 1
| 0
| 2
|
5,496
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/swin2sr/modeling_swin2sr.py
|
transformers.models.swin2sr.modeling_swin2sr.Swin2SRPatchEmbeddings
|
import collections.abc
from typing import Optional, Union
import torch
from torch import nn
class Swin2SRPatchEmbeddings(nn.Module):
def __init__(self, config, normalize_patches=True):
super().__init__()
num_channels = config.embed_dim
image_size, patch_size = (config.image_size, config.patch_size)
image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size)
patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size)
patches_resolution = [image_size[0] // patch_size[0], image_size[1] // patch_size[1]]
self.patches_resolution = patches_resolution
self.num_patches = patches_resolution[0] * patches_resolution[1]
self.projection = nn.Conv2d(num_channels, config.embed_dim, kernel_size=patch_size, stride=patch_size)
self.layernorm = nn.LayerNorm(config.embed_dim) if normalize_patches else None
def forward(self, embeddings: Optional[torch.FloatTensor]) -> tuple[torch.Tensor, tuple[int]]:
embeddings = self.projection(embeddings)
_, _, height, width = embeddings.shape
output_dimensions = (height, width)
embeddings = embeddings.flatten(2).transpose(1, 2)
if self.layernorm is not None:
embeddings = self.layernorm(embeddings)
return (embeddings, output_dimensions)
|
class Swin2SRPatchEmbeddings(nn.Module):
def __init__(self, config, normalize_patches=True):
pass
def forward(self, embeddings: Optional[torch.FloatTensor]) -> tuple[torch.Tensor, tuple[int]]:
pass
| 3
| 0
| 12
| 2
| 10
| 0
| 3
| 0
| 1
| 4
| 0
| 0
| 2
| 4
| 2
| 12
| 25
| 5
| 20
| 12
| 17
| 0
| 20
| 12
| 17
| 4
| 1
| 1
| 6
|
5,497
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/swin2sr/modeling_swin2sr.py
|
transformers.models.swin2sr.modeling_swin2sr.Swin2SRPatchMerging
|
import torch
from torch import nn
class Swin2SRPatchMerging(nn.Module):
"""
Patch Merging Layer.
Args:
input_resolution (`tuple[int]`):
Resolution of input feature.
dim (`int`):
Number of input channels.
norm_layer (`nn.Module`, *optional*, defaults to `nn.LayerNorm`):
Normalization layer class.
"""
def __init__(self, input_resolution: tuple[int], dim: int, norm_layer: nn.Module=nn.LayerNorm) -> None:
super().__init__()
self.input_resolution = input_resolution
self.dim = dim
self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False)
self.norm = norm_layer(2 * dim)
def maybe_pad(self, input_feature, height, width):
should_pad = height % 2 == 1 or width % 2 == 1
if should_pad:
pad_values = (0, 0, 0, width % 2, 0, height % 2)
input_feature = nn.functional.pad(input_feature, pad_values)
return input_feature
def forward(self, input_feature: torch.Tensor, input_dimensions: tuple[int, int]) -> torch.Tensor:
height, width = input_dimensions
batch_size, dim, num_channels = input_feature.shape
input_feature = input_feature.view(batch_size, height, width, num_channels)
input_feature = self.maybe_pad(input_feature, height, width)
input_feature_0 = input_feature[:, 0::2, 0::2, :]
input_feature_1 = input_feature[:, 1::2, 0::2, :]
input_feature_2 = input_feature[:, 0::2, 1::2, :]
input_feature_3 = input_feature[:, 1::2, 1::2, :]
input_feature = torch.cat([input_feature_0, input_feature_1, input_feature_2, input_feature_3], -1)
input_feature = input_feature.view(batch_size, -1, 4 * num_channels)
input_feature = self.reduction(input_feature)
input_feature = self.norm(input_feature)
return input_feature
|
class Swin2SRPatchMerging(nn.Module):
'''
Patch Merging Layer.
Args:
input_resolution (`tuple[int]`):
Resolution of input feature.
dim (`int`):
Number of input channels.
norm_layer (`nn.Module`, *optional*, defaults to `nn.LayerNorm`):
Normalization layer class.
'''
def __init__(self, input_resolution: tuple[int], dim: int, norm_layer: nn.Module=nn.LayerNorm) -> None:
pass
def maybe_pad(self, input_feature, height, width):
pass
def forward(self, input_feature: torch.Tensor, input_dimensions: tuple[int, int]) -> torch.Tensor:
pass
| 4
| 1
| 12
| 1
| 9
| 3
| 1
| 0.67
| 1
| 3
| 0
| 0
| 3
| 4
| 3
| 13
| 52
| 8
| 27
| 16
| 23
| 18
| 27
| 16
| 23
| 2
| 1
| 1
| 4
|
5,498
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/swin2sr/modeling_swin2sr.py
|
transformers.models.swin2sr.modeling_swin2sr.Swin2SRPatchUnEmbeddings
|
from torch import nn
class Swin2SRPatchUnEmbeddings(nn.Module):
"""Image to Patch Unembedding"""
def __init__(self, config):
super().__init__()
self.embed_dim = config.embed_dim
def forward(self, embeddings, x_size):
batch_size, height_width, num_channels = embeddings.shape
embeddings = embeddings.transpose(1, 2).view(batch_size, self.embed_dim, x_size[0], x_size[1])
return embeddings
|
class Swin2SRPatchUnEmbeddings(nn.Module):
'''Image to Patch Unembedding'''
def __init__(self, config):
pass
def forward(self, embeddings, x_size):
pass
| 3
| 1
| 4
| 1
| 4
| 1
| 1
| 0.25
| 1
| 1
| 0
| 0
| 2
| 1
| 2
| 12
| 12
| 3
| 8
| 5
| 5
| 2
| 8
| 5
| 5
| 1
| 1
| 0
| 2
|
5,499
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/swin2sr/modeling_swin2sr.py
|
transformers.models.swin2sr.modeling_swin2sr.Swin2SRPreTrainedModel
|
from torch import nn
import torch
from ...modeling_utils import PreTrainedModel
from ...utils import ModelOutput, auto_docstring, logging
from .configuration_swin2sr import Swin2SRConfig
@auto_docstring
class Swin2SRPreTrainedModel(PreTrainedModel):
config: Swin2SRConfig
base_model_prefix = 'swin2sr'
main_input_name = 'pixel_values'
supports_gradient_checkpointing = True
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, (nn.Linear, nn.Conv2d)):
torch.nn.init.trunc_normal_(module.weight.data, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
|
@auto_docstring
class Swin2SRPreTrainedModel(PreTrainedModel):
def _init_weights(self, module):
'''Initialize the weights'''
pass
| 3
| 1
| 9
| 0
| 8
| 1
| 4
| 0.38
| 1
| 0
| 0
| 2
| 1
| 0
| 1
| 1
| 20
| 2
| 13
| 6
| 11
| 5
| 12
| 6
| 10
| 4
| 1
| 2
| 4
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.