id
int64 0
328k
| repository_name
stringlengths 7
58
| file_path
stringlengths 9
302
| class_name
stringlengths 5
256
| human_written_code
stringlengths 16
2.16M
| class_skeleton
stringlengths 18
1.49M
⌀ | total_program_units
int64 1
1.76k
| total_doc_str
int64 0
771
| AvgCountLine
float64 0
7.89k
| AvgCountLineBlank
float64 0
297
| AvgCountLineCode
float64 0
7.89k
| AvgCountLineComment
float64 0
7.89k
| AvgCyclomatic
float64 0
130
| CommentToCodeRatio
float64 0
168
| CountClassBase
float64 0
40
| CountClassCoupled
float64 0
583
| CountClassCoupledModified
float64 0
575
| CountClassDerived
float64 0
5.35k
| CountDeclInstanceMethod
float64 0
529
| CountDeclInstanceVariable
float64 0
296
| CountDeclMethod
float64 0
599
| CountDeclMethodAll
float64 0
1.12k
| CountLine
float64 1
40.4k
| CountLineBlank
float64 0
8.16k
| CountLineCode
float64 1
25.7k
| CountLineCodeDecl
float64 1
8.15k
| CountLineCodeExe
float64 0
24.2k
| CountLineComment
float64 0
16.5k
| CountStmt
float64 1
9.71k
| CountStmtDecl
float64 1
8.15k
| CountStmtExe
float64 0
9.69k
| MaxCyclomatic
float64 0
759
| MaxInheritanceTree
float64 0
16
| MaxNesting
float64 0
34
| SumCyclomatic
float64 0
2.9k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4,200
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/olmo/modeling_olmo.py
|
transformers.models.olmo.modeling_olmo.OlmoMLP
|
import torch.nn as nn
from ...activations import ACT2FN
class OlmoMLP(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.hidden_size = config.hidden_size
self.intermediate_size = config.intermediate_size
self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
self.act_fn = ACT2FN[config.hidden_act]
def forward(self, x):
down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
return down_proj
|
class OlmoMLP(nn.Module):
def __init__(self, config):
pass
def forward(self, x):
pass
| 3
| 0
| 6
| 0
| 6
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 2
| 7
| 2
| 12
| 14
| 1
| 13
| 11
| 10
| 0
| 13
| 11
| 10
| 1
| 1
| 0
| 2
|
4,201
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/olmo/modeling_olmo.py
|
transformers.models.olmo.modeling_olmo.OlmoModel
|
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple
import torch.nn as nn
from ...cache_utils import Cache, DynamicCache
from ...processing_utils import Unpack
import torch.nn.functional as F
from ...masking_utils import create_causal_mask
from .configuration_olmo import OlmoConfig
from ...utils.generic import check_model_inputs
import torch
from ...modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast
from typing import Callable, Optional, Union
@auto_docstring
class OlmoModel(OlmoPreTrainedModel):
def __init__(self, config: OlmoConfig):
super().__init__(config)
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
self.layers = nn.ModuleList([OlmoDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)])
self.norm = OlmoLayerNorm(config.hidden_size)
self.rotary_emb = OlmoRotaryEmbedding(config=config)
self.gradient_checkpointing = False
self.post_init()
@check_model_inputs
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, cache_position: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, **kwargs: Unpack[TransformersKwargs]) -> BaseModelOutputWithPast:
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError('You must specify exactly one of input_ids or inputs_embeds')
if inputs_embeds is None:
inputs_embeds: torch.Tensor = self.embed_tokens(input_ids)
if use_cache and past_key_values is None:
past_key_values = DynamicCache(config=self.config)
if cache_position is None:
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
cache_position: torch.Tensor = torch.arange(past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device)
if position_ids is None:
position_ids = cache_position.unsqueeze(0)
causal_mask = create_causal_mask(config=self.config, input_embeds=inputs_embeds, attention_mask=attention_mask, cache_position=cache_position, past_key_values=past_key_values, position_ids=position_ids)
hidden_states = inputs_embeds
position_embeddings = self.rotary_emb(hidden_states, position_ids)
for decoder_layer in self.layers[:self.config.num_hidden_layers]:
hidden_states = decoder_layer(hidden_states, attention_mask=causal_mask, position_ids=position_ids, past_key_values=past_key_values, cache_position=cache_position, position_embeddings=position_embeddings, **kwargs)
hidden_states = self.norm(hidden_states)
return BaseModelOutputWithPast(last_hidden_state=hidden_states, past_key_values=past_key_values)
|
@auto_docstring
class OlmoModel(OlmoPreTrainedModel):
def __init__(self, config: OlmoConfig):
pass
@check_model_inputs
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, cache_position: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, **kwargs: Unpack[TransformersKwargs]) -> BaseModelOutputWithPast:
pass
| 6
| 0
| 40
| 5
| 30
| 6
| 6
| 0.22
| 1
| 16
| 10
| 1
| 5
| 7
| 6
| 7
| 257
| 34
| 184
| 65
| 146
| 40
| 89
| 34
| 82
| 21
| 2
| 2
| 37
|
4,202
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/olmo/modeling_olmo.py
|
transformers.models.olmo.modeling_olmo.OlmoPreTrainedModel
|
from .configuration_olmo import OlmoConfig
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
@auto_docstring
class OlmoPreTrainedModel(PreTrainedModel):
config: OlmoConfig
base_model_prefix = 'model'
supports_gradient_checkpointing = True
_no_split_modules = ['OlmoDecoderLayer']
_skip_keys_device_placement = ['past_key_values']
_supports_flash_attn = True
_supports_sdpa = True
_supports_flex_attn = True
_can_compile_fullgraph = True
_supports_attention_backend = True
_can_record_outputs = {'hidden_states': OlmoDecoderLayer, 'attentions': OlmoAttention}
|
@auto_docstring
class OlmoPreTrainedModel(PreTrainedModel):
pass
| 2
| 0
| 10
| 0
| 10
| 0
| 5
| 0
| 1
| 0
| 0
| 2
| 1
| 0
| 1
| 1
| 24
| 1
| 23
| 15
| 21
| 0
| 22
| 15
| 20
| 5
| 1
| 2
| 5
|
4,203
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/olmo/modeling_olmo.py
|
transformers.models.olmo.modeling_olmo.OlmoRotaryEmbedding
|
import torch.nn.functional as F
from ...modeling_rope_utils import ROPE_INIT_FUNCTIONS, dynamic_rope_update
import torch
from .configuration_olmo import OlmoConfig
import torch.nn as nn
class OlmoRotaryEmbedding(nn.Module):
inv_freq: torch.Tensor
def __init__(self, config: OlmoConfig, device=None):
super().__init__()
if hasattr(config, 'rope_scaling') and isinstance(config.rope_scaling, dict):
self.rope_type = config.rope_scaling.get('rope_type', config.rope_scaling.get('type'))
else:
self.rope_type = 'default'
self.max_seq_len_cached = config.max_position_embeddings
self.original_max_seq_len = config.max_position_embeddings
self.config = config
self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device)
self.register_buffer('inv_freq', inv_freq, persistent=False)
self.original_inv_freq = self.inv_freq
@torch.no_grad()
@dynamic_rope_update
def forward(self, x, position_ids):
inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device)
position_ids_expanded = position_ids[:, None, :].float()
device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != 'mps' else 'cpu'
with torch.autocast(device_type=device_type, enabled=False):
freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
emb = torch.cat((freqs, freqs), dim=-1)
cos = emb.cos() * self.attention_scaling
sin = emb.sin() * self.attention_scaling
return (cos, sin)
|
class OlmoRotaryEmbedding(nn.Module):
def __init__(self, config: OlmoConfig, device=None):
pass
@torch.no_grad()
@dynamic_rope_update
def forward(self, x, position_ids):
pass
| 5
| 0
| 18
| 2
| 13
| 5
| 3
| 0.35
| 1
| 4
| 1
| 0
| 3
| 7
| 3
| 13
| 59
| 8
| 40
| 21
| 35
| 14
| 38
| 20
| 34
| 3
| 1
| 1
| 8
|
4,204
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/olmo/modular_olmo.py
|
transformers.models.olmo.modular_olmo.OlmoAttention
|
import torch.nn as nn
import torch.nn.functional as F
from typing import Callable, Optional
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS
from ...utils.deprecation import deprecate_kwarg
from ...cache_utils import Cache
import torch
from ..llama.modeling_llama import LlamaAttention, LlamaDecoderLayer, LlamaForCausalLM, LlamaMLP, LlamaModel, LlamaRotaryEmbedding, eager_attention_forward, rotate_half
class OlmoAttention(LlamaAttention):
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, position_embeddings: tuple[torch.Tensor, torch.Tensor], attention_mask: Optional[torch.Tensor], past_key_values: Optional[Cache]=None, cache_position: Optional[torch.LongTensor]=None, **kwargs) -> tuple[torch.Tensor, Optional[torch.Tensor]]:
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, self.head_dim)
query_states = self.q_proj(hidden_states)
key_states = self.k_proj(hidden_states)
value_states = self.v_proj(hidden_states)
if self.config.clip_qkv is not None:
query_states.clamp_(min=-self.config.clip_qkv, max=self.config.clip_qkv)
key_states.clamp_(min=-self.config.clip_qkv, max=self.config.clip_qkv)
value_states.clamp_(min=-self.config.clip_qkv, max=self.config.clip_qkv)
query_states = query_states.view(hidden_shape).transpose(1, 2)
key_states = key_states.view(hidden_shape).transpose(1, 2)
value_states = value_states.view(hidden_shape).transpose(1, 2)
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
if past_key_values is not None:
cache_kwargs = {'sin': sin, 'cos': cos, 'cache_position': cache_position}
key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != 'eager':
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(self, query_states, key_states, value_states, attention_mask, dropout=0.0 if not self.training else self.attention_dropout, scaling=self.scaling, **kwargs)
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
attn_output = self.o_proj(attn_output)
return (attn_output, attn_weights)
|
class OlmoAttention(LlamaAttention):
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, position_embeddings: tuple[torch.Tensor, torch.Tensor], attention_mask: Optional[torch.Tensor], past_key_values: Optional[Cache]=None, cache_position: Optional[torch.LongTensor]=None, **kwargs) -> tuple[torch.Tensor, Optional[torch.Tensor]]:
pass
| 3
| 0
| 57
| 8
| 48
| 1
| 6
| 0.02
| 1
| 2
| 1
| 0
| 1
| 1
| 1
| 13
| 58
| 8
| 49
| 20
| 39
| 1
| 28
| 11
| 26
| 6
| 2
| 2
| 6
|
4,205
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/olmo/modular_olmo.py
|
transformers.models.olmo.modular_olmo.OlmoDecoderLayer
|
from ..llama.modeling_llama import LlamaAttention, LlamaDecoderLayer, LlamaForCausalLM, LlamaMLP, LlamaModel, LlamaRotaryEmbedding, eager_attention_forward, rotate_half
from .configuration_olmo import OlmoConfig
class OlmoDecoderLayer(LlamaDecoderLayer):
def __init__(self, config: OlmoConfig, layer_idx: int):
super().__init__(config, layer_idx)
self.input_layernorm = OlmoLayerNorm(config.hidden_size)
self.post_attention_layernorm = OlmoLayerNorm(config.hidden_size)
self.self_attn = OlmoAttention(config=config, layer_idx=layer_idx)
|
class OlmoDecoderLayer(LlamaDecoderLayer):
def __init__(self, config: OlmoConfig, layer_idx: int):
pass
| 2
| 0
| 5
| 0
| 5
| 0
| 1
| 0
| 1
| 5
| 3
| 0
| 1
| 3
| 1
| 13
| 6
| 0
| 6
| 5
| 4
| 0
| 6
| 5
| 4
| 1
| 2
| 0
| 1
|
4,206
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/olmo/modular_olmo.py
|
transformers.models.olmo.modular_olmo.OlmoForCausalLM
|
from ..llama.modeling_llama import LlamaAttention, LlamaDecoderLayer, LlamaForCausalLM, LlamaMLP, LlamaModel, LlamaRotaryEmbedding, eager_attention_forward, rotate_half
class OlmoForCausalLM(LlamaForCausalLM):
pass
|
class OlmoForCausalLM(LlamaForCausalLM):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 9
| 2
| 0
| 2
| 1
| 1
| 0
| 2
| 1
| 1
| 0
| 3
| 0
| 0
|
4,207
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/olmo/modular_olmo.py
|
transformers.models.olmo.modular_olmo.OlmoLayerNorm
|
import torch.nn.functional as F
import torch.nn as nn
import torch
class OlmoLayerNorm(nn.Module):
"""LayerNorm but with no learnable weight or bias."""
def __init__(self, hidden_size: int) -> None:
super().__init__()
self.normalized_shape = (hidden_size,)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
orig_dtype = hidden_states.dtype
return F.layer_norm(hidden_states.to(dtype=torch.float32), self.normalized_shape, None, None, eps=1e-05).to(orig_dtype)
|
class OlmoLayerNorm(nn.Module):
'''LayerNorm but with no learnable weight or bias.'''
def __init__(self, hidden_size: int) -> None:
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
| 3
| 1
| 4
| 0
| 4
| 0
| 1
| 0.11
| 1
| 3
| 0
| 0
| 2
| 1
| 2
| 12
| 12
| 2
| 9
| 5
| 6
| 1
| 7
| 5
| 4
| 1
| 1
| 0
| 2
|
4,208
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/olmo/modular_olmo.py
|
transformers.models.olmo.modular_olmo.OlmoMLP
|
from ..llama.modeling_llama import LlamaAttention, LlamaDecoderLayer, LlamaForCausalLM, LlamaMLP, LlamaModel, LlamaRotaryEmbedding, eager_attention_forward, rotate_half
import torch.nn as nn
class OlmoMLP(LlamaMLP):
def __init__(self, config):
super().__init__(config)
self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
|
class OlmoMLP(LlamaMLP):
def __init__(self, config):
pass
| 2
| 0
| 5
| 0
| 5
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 1
| 5
| 1
| 13
| 6
| 0
| 6
| 5
| 4
| 0
| 6
| 5
| 4
| 1
| 2
| 0
| 1
|
4,209
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/olmo/modular_olmo.py
|
transformers.models.olmo.modular_olmo.OlmoModel
|
from ..llama.modeling_llama import LlamaAttention, LlamaDecoderLayer, LlamaForCausalLM, LlamaMLP, LlamaModel, LlamaRotaryEmbedding, eager_attention_forward, rotate_half
import torch.nn as nn
from .configuration_olmo import OlmoConfig
class OlmoModel(LlamaModel):
def __init__(self, config: OlmoConfig):
super().__init__(config)
self.layers = nn.ModuleList([OlmoDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)])
self.norm = OlmoLayerNorm(config.hidden_size)
|
class OlmoModel(LlamaModel):
def __init__(self, config: OlmoConfig):
pass
| 2
| 0
| 6
| 0
| 6
| 0
| 1
| 0
| 1
| 5
| 3
| 0
| 1
| 2
| 1
| 8
| 7
| 0
| 7
| 4
| 5
| 0
| 5
| 4
| 3
| 1
| 3
| 0
| 1
|
4,210
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/olmo2/configuration_olmo2.py
|
transformers.models.olmo2.configuration_olmo2.Olmo2Config
|
from ...configuration_utils import PretrainedConfig
class Olmo2Config(PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`Olmo2Model`]. It is used to instantiate an OLMo2
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the [allenai/Olmo2-7B-1124-hf](https://huggingface.co/allenai/Olmo2-7B-1124-hf).
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 50304):
Vocabulary size of the Olmo2 model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`Olmo2Model`]
hidden_size (`int`, *optional*, defaults to 4096):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 11008):
Dimension of the MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 32):
Number of hidden layers in the Transformer decoder.
num_attention_heads (`int`, *optional*, defaults to 32):
Number of attention heads for each attention layer in the Transformer decoder.
num_key_value_heads (`int`, *optional*):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
`num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details, check out [this
paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to
`num_attention_heads`.
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the decoder.
max_position_embeddings (`int`, *optional*, defaults to 2048):
The maximum sequence length that this model might ever be used with.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
pad_token_id (`int`, *optional*, defaults to 1):
Padding token id.
bos_token_id (`int`, *optional*):
Beginning of stream token id.
eos_token_id (`int`, *optional*, defaults to 50279):
End of stream token id.
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether to tie weight embeddings
rope_theta (`float`, *optional*, defaults to 10000.0):
The base period of the RoPE embeddings.
rope_scaling (`Dict`, *optional*):
Dictionary containing the scaling configuration for the RoPE embeddings. Currently supports two scaling
strategies: linear and dynamic. Their scaling factor must be a float greater than 1. The expected format is
`{"type": strategy name, "factor": scaling factor}`. When using this flag, don't update
`max_position_embeddings` to the expected new maximum. See the following thread for more information on how
these scaling strategies behave:
https://www.reddit.com/r/LocalLLaMA/comments/14mrgpr/dynamically_scaled_rope_further_increases/. This is an
experimental feature, subject to breaking API changes in future versions.
attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`):
Whether to use a bias in the query, key, value and output projection layers during self-attention.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
rms_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the rms normalization layers.
```python
>>> from transformers import Olmo2Model, Olmo2Config
>>> # Initializing a Olmo2 7B style configuration
>>> configuration = Olmo2Config()
>>> # Initializing a model from the Olmo2 7B style configuration
>>> model = Olmo2Model(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```
"""
model_type = 'olmo2'
keys_to_ignore_at_inference = ['past_key_values']
base_model_tp_plan = {'layers.*.self_attn.q_proj': 'colwise_rep', 'layers.*.self_attn.k_proj': 'colwise_rep', 'layers.*.self_attn.v_proj': 'colwise_rep', 'layers.*.self_attn.o_proj': 'rowwise_rep', 'layers.*.mlp.gate_proj': 'colwise', 'layers.*.mlp.up_proj': 'colwise', 'layers.*.mlp.down_proj': 'rowwise'}
base_model_pp_plan = {'embed_tokens': (['input_ids'], ['inputs_embeds']), 'layers': (['hidden_states', 'attention_mask'], ['hidden_states']), 'norm': (['hidden_states'], ['hidden_states'])}
def __init__(self, vocab_size=50304, hidden_size=4096, intermediate_size=11008, num_hidden_layers=32, num_attention_heads=32, num_key_value_heads=None, hidden_act='silu', max_position_embeddings=2048, initializer_range=0.02, use_cache=True, pad_token_id=1, bos_token_id=None, eos_token_id=50279, tie_word_embeddings=False, rope_theta=10000.0, rope_scaling=None, attention_bias=False, attention_dropout=0.0, rms_norm_eps=1e-05, **kwargs):
super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs)
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
if num_key_value_heads is None:
num_key_value_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.use_cache = use_cache
self.rope_theta = rope_theta
self.rope_scaling = rope_scaling
self._rope_scaling_validation()
self.attention_bias = attention_bias
self.attention_dropout = attention_dropout
self.rms_norm_eps = rms_norm_eps
def _rope_scaling_validation(self):
"""
Validate the `rope_scaling` configuration.
"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 2:
raise ValueError(f'`rope_scaling` must be a dictionary with two fields, `type` and `factor`, got {self.rope_scaling}')
rope_scaling_type = self.rope_scaling.get('type', None)
rope_scaling_factor = self.rope_scaling.get('factor', None)
if rope_scaling_type is None or rope_scaling_type not in ['linear', 'dynamic']:
raise ValueError(f"`rope_scaling`'s type field must be one of ['linear', 'dynamic'], got {rope_scaling_type}")
if rope_scaling_factor is None or not isinstance(rope_scaling_factor, float) or rope_scaling_factor <= 1.0:
raise ValueError(f"`rope_scaling`'s factor field must be a float > 1, got {rope_scaling_factor}")
|
class Olmo2Config(PretrainedConfig):
'''
This is the configuration class to store the configuration of a [`Olmo2Model`]. It is used to instantiate an OLMo2
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the [allenai/Olmo2-7B-1124-hf](https://huggingface.co/allenai/Olmo2-7B-1124-hf).
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 50304):
Vocabulary size of the Olmo2 model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`Olmo2Model`]
hidden_size (`int`, *optional*, defaults to 4096):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 11008):
Dimension of the MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 32):
Number of hidden layers in the Transformer decoder.
num_attention_heads (`int`, *optional*, defaults to 32):
Number of attention heads for each attention layer in the Transformer decoder.
num_key_value_heads (`int`, *optional*):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
`num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details, check out [this
paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to
`num_attention_heads`.
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the decoder.
max_position_embeddings (`int`, *optional*, defaults to 2048):
The maximum sequence length that this model might ever be used with.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
pad_token_id (`int`, *optional*, defaults to 1):
Padding token id.
bos_token_id (`int`, *optional*):
Beginning of stream token id.
eos_token_id (`int`, *optional*, defaults to 50279):
End of stream token id.
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether to tie weight embeddings
rope_theta (`float`, *optional*, defaults to 10000.0):
The base period of the RoPE embeddings.
rope_scaling (`Dict`, *optional*):
Dictionary containing the scaling configuration for the RoPE embeddings. Currently supports two scaling
strategies: linear and dynamic. Their scaling factor must be a float greater than 1. The expected format is
`{"type": strategy name, "factor": scaling factor}`. When using this flag, don't update
`max_position_embeddings` to the expected new maximum. See the following thread for more information on how
these scaling strategies behave:
https://www.reddit.com/r/LocalLLaMA/comments/14mrgpr/dynamically_scaled_rope_further_increases/. This is an
experimental feature, subject to breaking API changes in future versions.
attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`):
Whether to use a bias in the query, key, value and output projection layers during self-attention.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
rms_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the rms normalization layers.
```python
>>> from transformers import Olmo2Model, Olmo2Config
>>> # Initializing a Olmo2 7B style configuration
>>> configuration = Olmo2Config()
>>> # Initializing a model from the Olmo2 7B style configuration
>>> model = Olmo2Model(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```
'''
def __init__(self, vocab_size=50304, hidden_size=4096, intermediate_size=11008, num_hidden_layers=32, num_attention_heads=32, num_key_value_heads=None, hidden_act='silu', max_position_embeddings=2048, initializer_range=0.02, use_cache=True, pad_token_id=1, bos_token_id=None, eos_token_id=50279, tie_word_embeddings=False, rope_theta=10000.0, rope_scaling=None, attention_bias=False, attention_dropout=0.0, rms_norm_eps=1e-05, **kwargs):
pass
def _rope_scaling_validation(self):
'''
Validate the `rope_scaling` configuration.
'''
pass
| 3
| 2
| 36
| 2
| 32
| 2
| 4
| 1.03
| 1
| 4
| 0
| 0
| 2
| 15
| 2
| 2
| 162
| 14
| 75
| 45
| 50
| 77
| 35
| 23
| 32
| 5
| 1
| 1
| 7
|
4,211
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/olmo2/modeling_olmo2.py
|
transformers.models.olmo2.modeling_olmo2.Olmo2Attention
|
import torch.nn as nn
from .configuration_olmo2 import Olmo2Config
from typing import Callable, Optional, Union
from ...utils.deprecation import deprecate_kwarg
from transformers.utils.generic import TransformersKwargs
from ...processing_utils import Unpack
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from ...cache_utils import Cache, DynamicCache
import torch
class Olmo2Attention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, config: Olmo2Config, layer_idx: Optional[int]=None):
super().__init__()
self.config = config
self.layer_idx = layer_idx
self.head_dim = getattr(config, 'head_dim', config.hidden_size // config.num_attention_heads)
self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
self.scaling = self.head_dim ** (-0.5)
self.attention_dropout = config.attention_dropout
self.is_causal = True
self.q_proj = nn.Linear(config.hidden_size, config.num_attention_heads * self.head_dim, bias=config.attention_bias)
self.k_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias)
self.v_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias)
self.o_proj = nn.Linear(config.num_attention_heads * self.head_dim, config.hidden_size, bias=config.attention_bias)
self.q_norm = Olmo2RMSNorm(config.num_attention_heads * self.head_dim, config.rms_norm_eps)
self.k_norm = Olmo2RMSNorm(config.num_key_value_heads * self.head_dim, config.rms_norm_eps)
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, position_embeddings: tuple[torch.Tensor, torch.Tensor], attention_mask: Optional[torch.Tensor], past_key_values: Optional[Cache]=None, cache_position: Optional[torch.LongTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> tuple[torch.Tensor, Optional[torch.Tensor]]:
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, self.head_dim)
query_states = self.q_norm(self.q_proj(hidden_states))
key_states = self.k_norm(self.k_proj(hidden_states))
value_states = self.v_proj(hidden_states)
query_states = query_states.view(hidden_shape).transpose(1, 2)
key_states = key_states.view(hidden_shape).transpose(1, 2)
value_states = value_states.view(hidden_shape).transpose(1, 2)
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
if past_key_values is not None:
cache_kwargs = {'sin': sin, 'cos': cos, 'cache_position': cache_position}
key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != 'eager':
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(self, query_states, key_states, value_states, attention_mask, dropout=0.0 if not self.training else self.attention_dropout, scaling=self.scaling, **kwargs)
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
attn_output = self.o_proj(attn_output)
return (attn_output, attn_weights)
|
class Olmo2Attention(nn.Module):
'''Multi-headed attention from 'Attention Is All You Need' paper'''
def __init__(self, config: Olmo2Config, layer_idx: Optional[int]=None):
pass
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, position_embeddings: tuple[torch.Tensor, torch.Tensor], attention_mask: Optional[torch.Tensor], past_key_values: Optional[Cache]=None, cache_position: Optional[torch.LongTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> tuple[torch.Tensor, Optional[torch.Tensor]]:
pass
| 4
| 1
| 38
| 4
| 34
| 1
| 3
| 0.03
| 1
| 6
| 3
| 0
| 2
| 13
| 2
| 12
| 80
| 10
| 68
| 33
| 57
| 2
| 39
| 25
| 36
| 5
| 1
| 2
| 6
|
4,212
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/olmo2/modeling_olmo2.py
|
transformers.models.olmo2.modeling_olmo2.Olmo2DecoderLayer
|
import torch
from transformers.utils.generic import TransformersKwargs
import torch.nn as nn
from ...utils.deprecation import deprecate_kwarg
from .configuration_olmo2 import Olmo2Config
from ...cache_utils import Cache, DynamicCache
from typing import Callable, Optional, Union
from ...modeling_layers import GradientCheckpointingLayer
from ...processing_utils import Unpack
class Olmo2DecoderLayer(GradientCheckpointingLayer):
def __init__(self, config: Olmo2Config, layer_idx: int):
super().__init__()
self.hidden_size = config.hidden_size
self.self_attn = Olmo2Attention(config=config, layer_idx=layer_idx)
self.mlp = Olmo2MLP(config)
self.post_attention_layernorm = Olmo2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.post_feedforward_layernorm = Olmo2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, use_cache: Optional[bool]=False, cache_position: Optional[torch.LongTensor]=None, position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]]=None, **kwargs: Unpack[TransformersKwargs]) -> torch.Tensor:
residual = hidden_states
hidden_states, _ = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, use_cache=use_cache, cache_position=cache_position, position_embeddings=position_embeddings, **kwargs)
hidden_states = self.post_attention_layernorm(hidden_states)
hidden_states = residual + hidden_states
residual = hidden_states
hidden_states = self.mlp(hidden_states)
hidden_states = self.post_feedforward_layernorm(hidden_states)
hidden_states = residual + hidden_states
return hidden_states
|
class Olmo2DecoderLayer(GradientCheckpointingLayer):
def __init__(self, config: Olmo2Config, layer_idx: int):
pass
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, use_cache: Optional[bool]=False, cache_position: Optional[torch.LongTensor]=None, position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]]=None, **kwargs: Unpack[TransformersKwargs]) -> torch.Tensor:
pass
| 4
| 0
| 24
| 3
| 21
| 2
| 2
| 0.07
| 1
| 9
| 5
| 0
| 2
| 5
| 2
| 12
| 50
| 6
| 42
| 22
| 28
| 3
| 21
| 11
| 18
| 2
| 1
| 1
| 3
|
4,213
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/olmo2/modeling_olmo2.py
|
transformers.models.olmo2.modeling_olmo2.Olmo2ForCausalLM
|
from ...generation import GenerationMixin
import torch.nn as nn
from ...utils import auto_docstring, can_return_tuple
import torch
from ...processing_utils import Unpack
from ...modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast
from typing import Callable, Optional, Union
from transformers.utils.generic import TransformersKwargs
from ...cache_utils import Cache, DynamicCache
@auto_docstring
class Olmo2ForCausalLM(Olmo2PreTrainedModel, GenerationMixin):
_tied_weights_keys = ['lm_head.weight']
_tp_plan = {'lm_head': 'colwise_rep'}
_pp_plan = {'lm_head': (['hidden_states'], ['logits'])}
def __init__(self, config):
super().__init__(config)
self.model = Olmo2Model(config)
self.vocab_size = config.vocab_size
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.post_init()
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, logits_to_keep: Union[int, torch.Tensor]=0, **kwargs: Unpack[TransformersKwargs]) -> CausalLMOutputWithPast:
"""
Example:
```python
>>> from transformers import AutoTokenizer, Olmo2ForCausalLM
>>> model = Olmo2ForCausalLM.from_pretrained("meta-olmo2/Olmo2-2-7b-hf")
>>> tokenizer = AutoTokenizer.from_pretrained("meta-olmo2/Olmo2-2-7b-hf")
>>> prompt = "Hey, are you conscious? Can you talk to me?"
>>> inputs = tokenizer(prompt, return_tensors="pt")
>>> # Generate
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"Hey, are you conscious? Can you talk to me?\\nI'm not conscious, but I can talk to you."
```"""
outputs: BaseModelOutputWithPast = self.model(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, cache_position=cache_position, **kwargs)
hidden_states = outputs.last_hidden_state
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :])
loss = None
if labels is not None:
loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.vocab_size, **kwargs)
return CausalLMOutputWithPast(loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
|
@auto_docstring
class Olmo2ForCausalLM(Olmo2PreTrainedModel, GenerationMixin):
def __init__(self, config):
pass
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, logits_to_keep: Union[int, torch.Tensor]=0, **kwargs: Unpack[TransformersKwargs]) -> CausalLMOutputWithPast:
'''
Example:
```python
>>> from transformers import AutoTokenizer, Olmo2ForCausalLM
>>> model = Olmo2ForCausalLM.from_pretrained("meta-olmo2/Olmo2-2-7b-hf")
>>> tokenizer = AutoTokenizer.from_pretrained("meta-olmo2/Olmo2-2-7b-hf")
>>> prompt = "Hey, are you conscious? Can you talk to me?"
>>> inputs = tokenizer(prompt, return_tensors="pt")
>>> # Generate
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
```'''
pass
| 6
| 1
| 14
| 2
| 9
| 4
| 2
| 0.38
| 2
| 9
| 4
| 0
| 8
| 3
| 8
| 9
| 123
| 21
| 74
| 36
| 47
| 28
| 36
| 20
| 27
| 8
| 2
| 1
| 15
|
4,214
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/olmo2/modeling_olmo2.py
|
transformers.models.olmo2.modeling_olmo2.Olmo2MLP
|
from ...activations import ACT2FN
import torch.nn as nn
class Olmo2MLP(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.hidden_size = config.hidden_size
self.intermediate_size = config.intermediate_size
self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
self.act_fn = ACT2FN[config.hidden_act]
def forward(self, x):
down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
return down_proj
|
class Olmo2MLP(nn.Module):
def __init__(self, config):
pass
def forward(self, x):
pass
| 3
| 0
| 6
| 0
| 6
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 2
| 7
| 2
| 12
| 14
| 1
| 13
| 11
| 10
| 0
| 13
| 11
| 10
| 1
| 1
| 0
| 2
|
4,215
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/olmo2/modeling_olmo2.py
|
transformers.models.olmo2.modeling_olmo2.Olmo2Model
|
from transformers.utils.generic import TransformersKwargs
from ...cache_utils import Cache, DynamicCache
import torch
from ...modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast
from typing import Callable, Optional, Union
import torch.nn as nn
from .configuration_olmo2 import Olmo2Config
from ...processing_utils import Unpack
from ...utils.generic import check_model_inputs
from ...utils import auto_docstring, can_return_tuple
from ...masking_utils import create_causal_mask
@auto_docstring
class Olmo2Model(Olmo2PreTrainedModel):
def __init__(self, config: Olmo2Config):
super().__init__(config)
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
self.layers = nn.ModuleList([Olmo2DecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)])
self.norm = Olmo2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.rotary_emb = Olmo2RotaryEmbedding(config=config)
self.gradient_checkpointing = False
self.post_init()
@check_model_inputs
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, cache_position: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, **kwargs: Unpack[TransformersKwargs]) -> BaseModelOutputWithPast:
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError('You must specify exactly one of input_ids or inputs_embeds')
if inputs_embeds is None:
inputs_embeds: torch.Tensor = self.embed_tokens(input_ids)
if use_cache and past_key_values is None:
past_key_values = DynamicCache(config=self.config)
if cache_position is None:
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
cache_position: torch.Tensor = torch.arange(past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device)
if position_ids is None:
position_ids = cache_position.unsqueeze(0)
causal_mask = create_causal_mask(config=self.config, input_embeds=inputs_embeds, attention_mask=attention_mask, cache_position=cache_position, past_key_values=past_key_values, position_ids=position_ids)
hidden_states = inputs_embeds
position_embeddings = self.rotary_emb(hidden_states, position_ids)
for decoder_layer in self.layers[:self.config.num_hidden_layers]:
hidden_states = decoder_layer(hidden_states, attention_mask=causal_mask, position_ids=position_ids, past_key_values=past_key_values, cache_position=cache_position, position_embeddings=position_embeddings, **kwargs)
hidden_states = self.norm(hidden_states)
return BaseModelOutputWithPast(last_hidden_state=hidden_states, past_key_values=past_key_values)
|
@auto_docstring
class Olmo2Model(Olmo2PreTrainedModel):
def __init__(self, config: Olmo2Config):
pass
@check_model_inputs
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, cache_position: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, **kwargs: Unpack[TransformersKwargs]) -> BaseModelOutputWithPast:
pass
| 6
| 0
| 40
| 5
| 30
| 6
| 6
| 0.22
| 1
| 16
| 10
| 0
| 5
| 7
| 6
| 7
| 257
| 34
| 184
| 65
| 146
| 40
| 89
| 34
| 82
| 21
| 2
| 2
| 37
|
4,216
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/olmo2/modeling_olmo2.py
|
transformers.models.olmo2.modeling_olmo2.Olmo2PreTrainedModel
|
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from .configuration_olmo2 import Olmo2Config
from ...utils import auto_docstring, can_return_tuple
@auto_docstring
class Olmo2PreTrainedModel(PreTrainedModel):
config: Olmo2Config
base_model_prefix = 'model'
supports_gradient_checkpointing = True
_no_split_modules = ['Olmo2DecoderLayer']
_skip_keys_device_placement = ['past_key_values']
_supports_flash_attn = True
_supports_sdpa = True
_supports_flex_attn = True
_can_compile_fullgraph = True
_supports_attention_backend = True
_can_record_outputs = {'hidden_states': Olmo2DecoderLayer, 'attentions': Olmo2Attention}
|
@auto_docstring
class Olmo2PreTrainedModel(PreTrainedModel):
pass
| 2
| 0
| 10
| 0
| 10
| 0
| 5
| 0
| 1
| 0
| 0
| 2
| 1
| 0
| 1
| 1
| 24
| 1
| 23
| 15
| 21
| 0
| 22
| 15
| 20
| 5
| 1
| 2
| 5
|
4,217
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/olmo2/modeling_olmo2.py
|
transformers.models.olmo2.modeling_olmo2.Olmo2RMSNorm
|
import torch
from ...integrations import use_kernel_forward_from_hub
import torch.nn as nn
@use_kernel_forward_from_hub('RMSNorm')
class Olmo2RMSNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-06):
"""
Olmo2RMSNorm is equivalent to T5LayerNorm
"""
super().__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.variance_epsilon = eps
def forward(self, hidden_states):
input_dtype = hidden_states.dtype
hidden_states = hidden_states.to(torch.float32)
variance = hidden_states.pow(2).mean(-1, keepdim=True)
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
return (self.weight * hidden_states).to(input_dtype)
def extra_repr(self):
return f'{tuple(self.weight.shape)}, eps={self.variance_epsilon}'
|
@use_kernel_forward_from_hub('RMSNorm')
class Olmo2RMSNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-06):
'''
Olmo2RMSNorm is equivalent to T5LayerNorm
'''
pass
def forward(self, hidden_states):
pass
def extra_repr(self):
pass
| 5
| 1
| 5
| 0
| 4
| 1
| 1
| 0.23
| 1
| 2
| 0
| 0
| 3
| 2
| 3
| 13
| 18
| 2
| 13
| 8
| 9
| 3
| 13
| 8
| 9
| 1
| 1
| 0
| 3
|
4,218
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/olmo2/modeling_olmo2.py
|
transformers.models.olmo2.modeling_olmo2.Olmo2RotaryEmbedding
|
import torch
import torch.nn as nn
from .configuration_olmo2 import Olmo2Config
from ...modeling_rope_utils import ROPE_INIT_FUNCTIONS, dynamic_rope_update
class Olmo2RotaryEmbedding(nn.Module):
inv_freq: torch.Tensor
def __init__(self, config: Olmo2Config, device=None):
super().__init__()
if hasattr(config, 'rope_scaling') and isinstance(config.rope_scaling, dict):
self.rope_type = config.rope_scaling.get('rope_type', config.rope_scaling.get('type'))
else:
self.rope_type = 'default'
self.max_seq_len_cached = config.max_position_embeddings
self.original_max_seq_len = config.max_position_embeddings
self.config = config
self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device)
self.register_buffer('inv_freq', inv_freq, persistent=False)
self.original_inv_freq = self.inv_freq
@torch.no_grad()
@dynamic_rope_update
def forward(self, x, position_ids):
inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device)
position_ids_expanded = position_ids[:, None, :].float()
device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != 'mps' else 'cpu'
with torch.autocast(device_type=device_type, enabled=False):
freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
emb = torch.cat((freqs, freqs), dim=-1)
cos = emb.cos() * self.attention_scaling
sin = emb.sin() * self.attention_scaling
return (cos, sin)
|
class Olmo2RotaryEmbedding(nn.Module):
def __init__(self, config: Olmo2Config, device=None):
pass
@torch.no_grad()
@dynamic_rope_update
def forward(self, x, position_ids):
pass
| 5
| 0
| 18
| 2
| 13
| 5
| 3
| 0.35
| 1
| 4
| 1
| 0
| 3
| 7
| 3
| 13
| 59
| 8
| 40
| 21
| 35
| 14
| 38
| 20
| 34
| 3
| 1
| 1
| 8
|
4,219
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/olmo2/modular_olmo2.py
|
transformers.models.olmo2.modular_olmo2.Olmo2Attention
|
from ...cache_utils import Cache
from ..olmo.modeling_olmo import OlmoAttention, OlmoDecoderLayer, OlmoForCausalLM, OlmoModel, OlmoRotaryEmbedding, apply_rotary_pos_emb
import torch
from ...utils.deprecation import deprecate_kwarg
from ...processing_utils import Unpack
from ..llama.modeling_llama import LlamaPreTrainedModel, LlamaRMSNorm, eager_attention_forward
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS
from typing import Callable, Optional
import torch.nn as nn
from transformers.utils.generic import TransformersKwargs
class Olmo2Attention(OlmoAttention):
def __init__(self, config: Olmo2Config, layer_idx: Optional[int]=None):
super().__init__(config, layer_idx=layer_idx)
self.q_norm = Olmo2RMSNorm(config.num_attention_heads * self.head_dim, config.rms_norm_eps)
self.k_norm = Olmo2RMSNorm(config.num_key_value_heads * self.head_dim, config.rms_norm_eps)
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, position_embeddings: tuple[torch.Tensor, torch.Tensor], attention_mask: Optional[torch.Tensor], past_key_values: Optional[Cache]=None, cache_position: Optional[torch.LongTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> tuple[torch.Tensor, Optional[torch.Tensor]]:
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, self.head_dim)
query_states = self.q_norm(self.q_proj(hidden_states))
key_states = self.k_norm(self.k_proj(hidden_states))
value_states = self.v_proj(hidden_states)
query_states = query_states.view(hidden_shape).transpose(1, 2)
key_states = key_states.view(hidden_shape).transpose(1, 2)
value_states = value_states.view(hidden_shape).transpose(1, 2)
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
if past_key_values is not None:
cache_kwargs = {'sin': sin, 'cos': cos, 'cache_position': cache_position}
key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != 'eager':
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(self, query_states, key_states, value_states, attention_mask, dropout=0.0 if not self.training else self.attention_dropout, scaling=self.scaling, **kwargs)
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
attn_output = self.o_proj(attn_output)
return (attn_output, attn_weights)
|
class Olmo2Attention(OlmoAttention):
def __init__(self, config: Olmo2Config, layer_idx: Optional[int]=None):
pass
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, position_embeddings: tuple[torch.Tensor, torch.Tensor], attention_mask: Optional[torch.Tensor], past_key_values: Optional[Cache]=None, cache_position: Optional[torch.LongTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> tuple[torch.Tensor, Optional[torch.Tensor]]:
pass
| 4
| 0
| 28
| 4
| 24
| 1
| 3
| 0.02
| 1
| 6
| 3
| 0
| 2
| 3
| 2
| 14
| 58
| 8
| 49
| 23
| 38
| 1
| 28
| 14
| 25
| 5
| 2
| 2
| 6
|
4,220
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/olmo2/modular_olmo2.py
|
transformers.models.olmo2.modular_olmo2.Olmo2Config
|
from ..olmo.configuration_olmo import OlmoConfig
class Olmo2Config(OlmoConfig):
"""
This is the configuration class to store the configuration of a [`Olmo2Model`]. It is used to instantiate an OLMo2
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the [allenai/Olmo2-7B-1124-hf](https://huggingface.co/allenai/Olmo2-7B-1124-hf).
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 50304):
Vocabulary size of the Olmo2 model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`Olmo2Model`]
hidden_size (`int`, *optional*, defaults to 4096):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 11008):
Dimension of the MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 32):
Number of hidden layers in the Transformer decoder.
num_attention_heads (`int`, *optional*, defaults to 32):
Number of attention heads for each attention layer in the Transformer decoder.
num_key_value_heads (`int`, *optional*):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
`num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details, check out [this
paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to
`num_attention_heads`.
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the decoder.
max_position_embeddings (`int`, *optional*, defaults to 2048):
The maximum sequence length that this model might ever be used with.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
pad_token_id (`int`, *optional*, defaults to 1):
Padding token id.
bos_token_id (`int`, *optional*):
Beginning of stream token id.
eos_token_id (`int`, *optional*, defaults to 50279):
End of stream token id.
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether to tie weight embeddings
rope_theta (`float`, *optional*, defaults to 10000.0):
The base period of the RoPE embeddings.
rope_scaling (`Dict`, *optional*):
Dictionary containing the scaling configuration for the RoPE embeddings. Currently supports two scaling
strategies: linear and dynamic. Their scaling factor must be a float greater than 1. The expected format is
`{"type": strategy name, "factor": scaling factor}`. When using this flag, don't update
`max_position_embeddings` to the expected new maximum. See the following thread for more information on how
these scaling strategies behave:
https://www.reddit.com/r/LocalLLaMA/comments/14mrgpr/dynamically_scaled_rope_further_increases/. This is an
experimental feature, subject to breaking API changes in future versions.
attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`):
Whether to use a bias in the query, key, value and output projection layers during self-attention.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
rms_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the rms normalization layers.
```python
>>> from transformers import Olmo2Model, Olmo2Config
>>> # Initializing a Olmo2 7B style configuration
>>> configuration = Olmo2Config()
>>> # Initializing a model from the Olmo2 7B style configuration
>>> model = Olmo2Model(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```
"""
model_type = 'olmo2'
base_model_tp_plan = {'layers.*.self_attn.q_proj': 'colwise_rep', 'layers.*.self_attn.k_proj': 'colwise_rep', 'layers.*.self_attn.v_proj': 'colwise_rep', 'layers.*.self_attn.o_proj': 'rowwise_rep', 'layers.*.mlp.gate_proj': 'colwise', 'layers.*.mlp.up_proj': 'colwise', 'layers.*.mlp.down_proj': 'rowwise'}
base_model_pp_plan = {'embed_tokens': (['input_ids'], ['inputs_embeds']), 'layers': (['hidden_states', 'attention_mask'], ['hidden_states']), 'norm': (['hidden_states'], ['hidden_states'])}
def __init__(self, vocab_size=50304, hidden_size=4096, intermediate_size=11008, num_hidden_layers=32, num_attention_heads=32, num_key_value_heads=None, hidden_act='silu', max_position_embeddings=2048, initializer_range=0.02, use_cache=True, pad_token_id=1, bos_token_id=None, eos_token_id=50279, tie_word_embeddings=False, rope_theta=10000.0, rope_scaling=None, attention_bias=False, attention_dropout=0.0, rms_norm_eps=1e-05, **kwargs):
super().__init__(vocab_size=vocab_size, hidden_size=hidden_size, intermediate_size=intermediate_size, num_hidden_layers=num_hidden_layers, num_attention_heads=num_attention_heads, num_key_value_heads=num_key_value_heads, hidden_act=hidden_act, max_position_embeddings=max_position_embeddings, initializer_range=initializer_range, use_cache=use_cache, pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, rope_theta=rope_theta, rope_scaling=rope_scaling, attention_bias=attention_bias, attention_dropout=attention_dropout, **kwargs)
self.rms_norm_eps = rms_norm_eps
del self.clip_qkv
|
class Olmo2Config(OlmoConfig):
'''
This is the configuration class to store the configuration of a [`Olmo2Model`]. It is used to instantiate an OLMo2
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the [allenai/Olmo2-7B-1124-hf](https://huggingface.co/allenai/Olmo2-7B-1124-hf).
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 50304):
Vocabulary size of the Olmo2 model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`Olmo2Model`]
hidden_size (`int`, *optional*, defaults to 4096):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 11008):
Dimension of the MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 32):
Number of hidden layers in the Transformer decoder.
num_attention_heads (`int`, *optional*, defaults to 32):
Number of attention heads for each attention layer in the Transformer decoder.
num_key_value_heads (`int`, *optional*):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
`num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details, check out [this
paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to
`num_attention_heads`.
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the decoder.
max_position_embeddings (`int`, *optional*, defaults to 2048):
The maximum sequence length that this model might ever be used with.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
pad_token_id (`int`, *optional*, defaults to 1):
Padding token id.
bos_token_id (`int`, *optional*):
Beginning of stream token id.
eos_token_id (`int`, *optional*, defaults to 50279):
End of stream token id.
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether to tie weight embeddings
rope_theta (`float`, *optional*, defaults to 10000.0):
The base period of the RoPE embeddings.
rope_scaling (`Dict`, *optional*):
Dictionary containing the scaling configuration for the RoPE embeddings. Currently supports two scaling
strategies: linear and dynamic. Their scaling factor must be a float greater than 1. The expected format is
`{"type": strategy name, "factor": scaling factor}`. When using this flag, don't update
`max_position_embeddings` to the expected new maximum. See the following thread for more information on how
these scaling strategies behave:
https://www.reddit.com/r/LocalLLaMA/comments/14mrgpr/dynamically_scaled_rope_further_increases/. This is an
experimental feature, subject to breaking API changes in future versions.
attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`):
Whether to use a bias in the query, key, value and output projection layers during self-attention.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
rms_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the rms normalization layers.
```python
>>> from transformers import Olmo2Model, Olmo2Config
>>> # Initializing a Olmo2 7B style configuration
>>> configuration = Olmo2Config()
>>> # Initializing a model from the Olmo2 7B style configuration
>>> model = Olmo2Model(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```
'''
def __init__(self, vocab_size=50304, hidden_size=4096, intermediate_size=11008, num_hidden_layers=32, num_attention_heads=32, num_key_value_heads=None, hidden_act='silu', max_position_embeddings=2048, initializer_range=0.02, use_cache=True, pad_token_id=1, bos_token_id=None, eos_token_id=50279, tie_word_embeddings=False, rope_theta=10000.0, rope_scaling=None, attention_bias=False, attention_dropout=0.0, rms_norm_eps=1e-05, **kwargs):
pass
| 2
| 1
| 47
| 1
| 46
| 0
| 1
| 1.28
| 1
| 1
| 0
| 0
| 1
| 1
| 1
| 3
| 136
| 10
| 57
| 27
| 33
| 73
| 7
| 5
| 5
| 1
| 2
| 0
| 1
|
4,221
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/olmo2/modular_olmo2.py
|
transformers.models.olmo2.modular_olmo2.Olmo2DecoderLayer
|
import torch.nn as nn
from typing import Callable, Optional
from ..olmo.modeling_olmo import OlmoAttention, OlmoDecoderLayer, OlmoForCausalLM, OlmoModel, OlmoRotaryEmbedding, apply_rotary_pos_emb
from ...cache_utils import Cache
from transformers.utils.generic import TransformersKwargs
import torch
from ...processing_utils import Unpack
from ...utils.deprecation import deprecate_kwarg
class Olmo2DecoderLayer(OlmoDecoderLayer):
def __init__(self, config: Olmo2Config, layer_idx: int):
super().__init__(config, layer_idx=layer_idx)
self.post_attention_layernorm = Olmo2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.post_feedforward_layernorm = Olmo2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.self_attn = Olmo2Attention(config=config, layer_idx=layer_idx)
del self.input_layernorm
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, use_cache: Optional[bool]=False, cache_position: Optional[torch.LongTensor]=None, position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]]=None, **kwargs: Unpack[TransformersKwargs]) -> torch.Tensor:
residual = hidden_states
hidden_states, _ = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, use_cache=use_cache, cache_position=cache_position, position_embeddings=position_embeddings, **kwargs)
hidden_states = self.post_attention_layernorm(hidden_states)
hidden_states = residual + hidden_states
residual = hidden_states
hidden_states = self.mlp(hidden_states)
hidden_states = self.post_feedforward_layernorm(hidden_states)
hidden_states = residual + hidden_states
return hidden_states
|
class Olmo2DecoderLayer(OlmoDecoderLayer):
def __init__(self, config: Olmo2Config, layer_idx: int):
pass
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, use_cache: Optional[bool]=False, cache_position: Optional[torch.LongTensor]=None, position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]]=None, **kwargs: Unpack[TransformersKwargs]) -> torch.Tensor:
pass
| 4
| 0
| 23
| 2
| 20
| 2
| 2
| 0.07
| 1
| 8
| 4
| 0
| 2
| 3
| 2
| 14
| 48
| 5
| 41
| 20
| 27
| 3
| 20
| 9
| 17
| 2
| 2
| 1
| 3
|
4,222
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/olmo2/modular_olmo2.py
|
transformers.models.olmo2.modular_olmo2.Olmo2ForCausalLM
|
from ..olmo.modeling_olmo import OlmoAttention, OlmoDecoderLayer, OlmoForCausalLM, OlmoModel, OlmoRotaryEmbedding, apply_rotary_pos_emb
class Olmo2ForCausalLM(OlmoForCausalLM):
pass
|
class Olmo2ForCausalLM(OlmoForCausalLM):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 9
| 2
| 0
| 2
| 1
| 1
| 0
| 2
| 1
| 1
| 0
| 3
| 0
| 0
|
4,223
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/olmo2/modular_olmo2.py
|
transformers.models.olmo2.modular_olmo2.Olmo2Model
|
from ..olmo.modeling_olmo import OlmoAttention, OlmoDecoderLayer, OlmoForCausalLM, OlmoModel, OlmoRotaryEmbedding, apply_rotary_pos_emb
import torch.nn as nn
class Olmo2Model(OlmoModel):
def __init__(self, config: Olmo2Config):
super().__init__(config)
self.norm = Olmo2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.layers = nn.ModuleList([Olmo2DecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)])
|
class Olmo2Model(OlmoModel):
def __init__(self, config: Olmo2Config):
pass
| 2
| 0
| 6
| 0
| 6
| 0
| 1
| 0
| 1
| 5
| 3
| 0
| 1
| 2
| 1
| 8
| 7
| 0
| 7
| 4
| 5
| 0
| 5
| 4
| 3
| 1
| 3
| 0
| 1
|
4,224
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/olmo2/modular_olmo2.py
|
transformers.models.olmo2.modular_olmo2.Olmo2RMSNorm
|
import torch
from ..llama.modeling_llama import LlamaPreTrainedModel, LlamaRMSNorm, eager_attention_forward
import torch.nn as nn
class Olmo2RMSNorm(LlamaRMSNorm):
def forward(self, hidden_states):
input_dtype = hidden_states.dtype
hidden_states = hidden_states.to(torch.float32)
variance = hidden_states.pow(2).mean(-1, keepdim=True)
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
return (self.weight * hidden_states).to(input_dtype)
|
class Olmo2RMSNorm(LlamaRMSNorm):
def forward(self, hidden_states):
pass
| 2
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 13
| 2
| 0
| 2
| 1
| 1
| 0
| 2
| 1
| 1
| 0
| 2
| 0
| 0
|
4,225
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/olmoe/configuration_olmoe.py
|
transformers.models.olmoe.configuration_olmoe.OlmoeConfig
|
from ...modeling_rope_utils import rope_config_validation
from ...configuration_utils import PretrainedConfig
class OlmoeConfig(PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`OlmoeModel`]. It is used to instantiate an OLMoE
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the [allenai/OLMoE-1B-7B-0924](https://huggingface.co/allenai/OLMoE-1B-7B-0924).
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 50304):
Vocabulary size of the OLMoE model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`OlmoeModel`]
hidden_size (`int`, *optional*, defaults to 2048):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 2048):
Dimension of the MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 16):
Number of hidden layers in the Transformer decoder.
num_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer decoder.
num_key_value_heads (`int`, *optional*):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
`num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details, check out [this
paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to
`num_attention_heads`.
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the decoder.
max_position_embeddings (`int`, *optional*, defaults to 4096):
The maximum sequence length that this model might ever be used with.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
rms_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the rms normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
pad_token_id (`int`, *optional*, defaults to 1):
Padding token id.
bos_token_id (`int`, *optional*):
Beginning of stream token id.
eos_token_id (`int`, *optional*, defaults to 50279):
End of stream token id.
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether to tie weight embeddings
rope_theta (`float`, *optional*, defaults to 10000.0):
The base period of the RoPE embeddings.
rope_scaling (`Dict`, *optional*):
Dictionary containing the scaling configuration for the RoPE embeddings. Currently supports two scaling
strategies: linear and dynamic. Their scaling factor must be a float greater than 1. The expected format is
`{"type": strategy name, "factor": scaling factor}`. When using this flag, don't update
`max_position_embeddings` to the expected new maximum. See the following thread for more information on how
these scaling strategies behave:
https://www.reddit.com/r/LocalLLaMA/comments/14mrgpr/dynamically_scaled_rope_further_increases/. This is an
experimental feature, subject to breaking API changes in future versions.
attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`):
Whether to use a bias in the query, key, value and output projection layers during self-attention.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
clip_qkv (`float`, *optional*):
If not `None`, elements of query, key and value attention states are clipped so that their
absolute value does not exceed this value.
num_experts_per_tok (`int`, *optional*, defaults to 8):
Number of selected experts.
num_experts (`int`, *optional*, defaults to 64):
Number of routed experts.
output_router_logits (`bool`, *optional*, defaults to `False`):
Whether or not the router logits should be returned by the model. Enabling this will also
allow the model to output the auxiliary loss, including load balancing loss and router z-loss.
router_aux_loss_coef (`float`, *optional*, defaults to 0.01):
The aux loss factor for the total loss.
norm_topk_prob (`bool`, *optional*, defaults to `False`):
Whether to normalize the topk probabilities.
```python
>>> from transformers import OlmoeModel, OlmoeConfig
>>> # Initializing a OLMoE 7B A1B style configuration
>>> configuration = OlmoeConfig()
>>> # Initializing a model from the OLMoE 7B A1B style configuration
>>> model = OlmoeModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = 'olmoe'
keys_to_ignore_at_inference = ['past_key_values']
def __init__(self, vocab_size=50304, hidden_size=2048, intermediate_size=2048, num_hidden_layers=16, num_attention_heads=16, num_key_value_heads=None, hidden_act='silu', max_position_embeddings=4096, initializer_range=0.02, rms_norm_eps=1e-05, use_cache=True, pad_token_id=1, bos_token_id=None, eos_token_id=50279, tie_word_embeddings=False, rope_theta=10000.0, rope_scaling=None, attention_bias=False, attention_dropout=0.0, clip_qkv=None, num_experts_per_tok=8, num_experts=64, output_router_logits=False, router_aux_loss_coef=0.01, norm_topk_prob=False, **kwargs):
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
if num_key_value_heads is None:
num_key_value_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.rms_norm_eps = rms_norm_eps
self.use_cache = use_cache
self.rope_theta = rope_theta
self.rope_scaling = rope_scaling
self.attention_bias = attention_bias
self.attention_dropout = attention_dropout
self.clip_qkv = clip_qkv
self.num_experts_per_tok = num_experts_per_tok
self.num_experts = num_experts
self.output_router_logits = output_router_logits
self.router_aux_loss_coef = router_aux_loss_coef
self.norm_topk_prob = norm_topk_prob
if self.rope_scaling is not None and 'type' in self.rope_scaling:
self.rope_scaling['rope_type'] = self.rope_scaling['type']
rope_config_validation(self)
super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs)
|
class OlmoeConfig(PretrainedConfig):
'''
This is the configuration class to store the configuration of a [`OlmoeModel`]. It is used to instantiate an OLMoE
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the [allenai/OLMoE-1B-7B-0924](https://huggingface.co/allenai/OLMoE-1B-7B-0924).
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 50304):
Vocabulary size of the OLMoE model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`OlmoeModel`]
hidden_size (`int`, *optional*, defaults to 2048):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 2048):
Dimension of the MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 16):
Number of hidden layers in the Transformer decoder.
num_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer decoder.
num_key_value_heads (`int`, *optional*):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
`num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details, check out [this
paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to
`num_attention_heads`.
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the decoder.
max_position_embeddings (`int`, *optional*, defaults to 4096):
The maximum sequence length that this model might ever be used with.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
rms_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the rms normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
pad_token_id (`int`, *optional*, defaults to 1):
Padding token id.
bos_token_id (`int`, *optional*):
Beginning of stream token id.
eos_token_id (`int`, *optional*, defaults to 50279):
End of stream token id.
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether to tie weight embeddings
rope_theta (`float`, *optional*, defaults to 10000.0):
The base period of the RoPE embeddings.
rope_scaling (`Dict`, *optional*):
Dictionary containing the scaling configuration for the RoPE embeddings. Currently supports two scaling
strategies: linear and dynamic. Their scaling factor must be a float greater than 1. The expected format is
`{"type": strategy name, "factor": scaling factor}`. When using this flag, don't update
`max_position_embeddings` to the expected new maximum. See the following thread for more information on how
these scaling strategies behave:
https://www.reddit.com/r/LocalLLaMA/comments/14mrgpr/dynamically_scaled_rope_further_increases/. This is an
experimental feature, subject to breaking API changes in future versions.
attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`):
Whether to use a bias in the query, key, value and output projection layers during self-attention.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
clip_qkv (`float`, *optional*):
If not `None`, elements of query, key and value attention states are clipped so that their
absolute value does not exceed this value.
num_experts_per_tok (`int`, *optional*, defaults to 8):
Number of selected experts.
num_experts (`int`, *optional*, defaults to 64):
Number of routed experts.
output_router_logits (`bool`, *optional*, defaults to `False`):
Whether or not the router logits should be returned by the model. Enabling this will also
allow the model to output the auxiliary loss, including load balancing loss and router z-loss.
router_aux_loss_coef (`float`, *optional*, defaults to 0.01):
The aux loss factor for the total loss.
norm_topk_prob (`bool`, *optional*, defaults to `False`):
Whether to normalize the topk probabilities.
```python
>>> from transformers import OlmoeModel, OlmoeConfig
>>> # Initializing a OLMoE 7B A1B style configuration
>>> configuration = OlmoeConfig()
>>> # Initializing a model from the OLMoE 7B A1B style configuration
>>> model = OlmoeModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```'''
def __init__(self, vocab_size=50304, hidden_size=2048, intermediate_size=2048, num_hidden_layers=16, num_attention_heads=16, num_key_value_heads=None, hidden_act='silu', max_position_embeddings=4096, initializer_range=0.02, rms_norm_eps=1e-05, use_cache=True, pad_token_id=1, bos_token_id=None, eos_token_id=50279, tie_word_embeddings=False, rope_theta=10000.0, rope_scaling=None, attention_bias=False, attention_dropout=0.0, clip_qkv=None, num_experts_per_tok=8, num_experts=64, output_router_logits=False, router_aux_loss_coef=0.01, norm_topk_prob=False, **kwargs):
pass
| 2
| 1
| 68
| 3
| 62
| 3
| 3
| 1.31
| 1
| 1
| 0
| 0
| 1
| 21
| 1
| 1
| 162
| 12
| 65
| 53
| 35
| 85
| 31
| 25
| 29
| 3
| 1
| 1
| 3
|
4,226
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/olmoe/modeling_olmoe.py
|
transformers.models.olmoe.modeling_olmoe.OlmoeAttention
|
import math
from ...utils.deprecation import deprecate_kwarg
from typing import Optional, Union
from .configuration_olmoe import OlmoeConfig
import torch.nn.functional as F
import torch
from torch import nn
from ...cache_utils import Cache, DynamicCache, StaticCache
class OlmoeAttention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, config: OlmoeConfig, layer_idx: Optional[int]=None):
super().__init__()
self.config = config
self.layer_idx = layer_idx
if layer_idx is None:
logger.warning_once(f'Instantiating {self.__class__.__name__} without passing a `layer_idx` is not recommended and will lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` when creating this class.')
self.attention_dropout = config.attention_dropout
self.hidden_size = config.hidden_size
self.num_heads = config.num_attention_heads
self.head_dim = self.hidden_size // self.num_heads
self.num_key_value_heads = config.num_key_value_heads
self.num_key_value_groups = self.num_heads // self.num_key_value_heads
self.max_position_embeddings = config.max_position_embeddings
self.rope_theta = config.rope_theta
self.is_causal = True
if self.head_dim * self.num_heads != self.hidden_size:
raise ValueError(f'hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size} and `num_heads`: {self.num_heads}).')
self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=config.attention_bias)
self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.attention_bias)
self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.attention_bias)
self.o_proj = nn.Linear(self.hidden_size, self.hidden_size, bias=config.attention_bias)
self.q_norm = OlmoeRMSNorm(self.hidden_size, eps=config.rms_norm_eps)
self.k_norm = OlmoeRMSNorm(self.hidden_size // self.num_heads * self.num_key_value_heads, eps=config.rms_norm_eps)
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, output_attentions: bool=False, use_cache: bool=False, cache_position: Optional[torch.LongTensor]=None, position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]]=None, **kwargs) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
bsz, q_len, _ = hidden_states.size()
query_states = self.q_norm(self.q_proj(hidden_states))
key_states = self.k_norm(self.k_proj(hidden_states))
value_states = self.v_proj(hidden_states)
if self.config.clip_qkv is not None:
query_states.clamp_(min=-self.config.clip_qkv, max=self.config.clip_qkv)
key_states.clamp_(min=-self.config.clip_qkv, max=self.config.clip_qkv)
value_states.clamp_(min=-self.config.clip_qkv, max=self.config.clip_qkv)
query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
if past_key_values is not None:
cache_kwargs = {'sin': sin, 'cos': cos, 'cache_position': cache_position}
key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
key_states = repeat_kv(key_states, self.num_key_value_groups)
value_states = repeat_kv(value_states, self.num_key_value_groups)
attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
if attention_mask is not None:
causal_mask = attention_mask[:, :, :, :key_states.shape[-2]]
attn_weights = attn_weights + causal_mask
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
attn_weights = nn.functional.dropout(attn_weights, p=self.attention_dropout, training=self.training)
attn_output = torch.matmul(attn_weights, value_states)
if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
raise ValueError(f'`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is {attn_output.size()}')
attn_output = attn_output.transpose(1, 2).contiguous()
attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
attn_output = self.o_proj(attn_output)
if not output_attentions:
attn_weights = None
return (attn_output, attn_weights)
|
class OlmoeAttention(nn.Module):
'''Multi-headed attention from 'Attention Is All You Need' paper'''
def __init__(self, config: OlmoeConfig, layer_idx: Optional[int]=None):
pass
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, output_attentions: bool=False, use_cache: bool=False, cache_position: Optional[torch.LongTensor]=None, position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]]=None, **kwargs) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
pass
| 4
| 1
| 50
| 9
| 40
| 2
| 5
| 0.05
| 1
| 8
| 3
| 2
| 2
| 17
| 2
| 12
| 104
| 20
| 81
| 40
| 67
| 4
| 58
| 29
| 55
| 6
| 1
| 1
| 9
|
4,227
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/olmoe/modeling_olmoe.py
|
transformers.models.olmoe.modeling_olmoe.OlmoeDecoderLayer
|
from ...utils.deprecation import deprecate_kwarg
from ...modeling_layers import GradientCheckpointingLayer
from typing import Optional, Union
import torch
from ...cache_utils import Cache, DynamicCache, StaticCache
from .configuration_olmoe import OlmoeConfig
import torch.nn.functional as F
class OlmoeDecoderLayer(GradientCheckpointingLayer):
def __init__(self, config: OlmoeConfig, layer_idx: int):
super().__init__()
self.hidden_size = config.hidden_size
self.self_attn = OLMOE_ATTENTION_CLASSES[config._attn_implementation](config=config, layer_idx=layer_idx)
self.mlp = OlmoeSparseMoeBlock(config)
self.input_layernorm = OlmoeRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.post_attention_layernorm = OlmoeRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, output_attentions: Optional[bool]=False, output_router_logits: Optional[bool]=False, use_cache: Optional[bool]=False, cache_position: Optional[torch.LongTensor]=None, position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]]=None, **kwargs) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]:
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`, *optional*):
attention mask of size `(batch_size, sequence_length)` if flash attention is used or `(batch_size, 1,
query_sequence_length, key_sequence_length)` if default attention is used.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_router_logits (`bool`, *optional*):
Whether or not to return the logits of all the routers. They are useful for computing the router loss,
and should not be returned during inference.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
(see `past_key_values`).
past_key_values (`Cache`, *optional*): cached past key and value projection states
cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
Indices depicting the position of the input sequence tokens in the sequence
position_embeddings (`tuple[torch.FloatTensor, torch.FloatTensor]`, *optional*):
Tuple containing the cosine and sine positional embeddings of shape `(batch_size, seq_len, head_dim)`,
with `head_dim` being the embedding dimension of each attention head.
kwargs (`dict`, *optional*):
Arbitrary kwargs to be ignored, used for FSDP and other methods that injects code
into the model
"""
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
hidden_states, self_attn_weights = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position, position_embeddings=position_embeddings, **kwargs)
hidden_states = residual + hidden_states
residual = hidden_states
hidden_states = self.post_attention_layernorm(hidden_states)
hidden_states, router_logits = self.mlp(hidden_states)
hidden_states = residual + hidden_states
outputs = (hidden_states,)
if output_attentions:
outputs += (self_attn_weights,)
if output_router_logits:
outputs += (router_logits,)
return outputs
|
class OlmoeDecoderLayer(GradientCheckpointingLayer):
def __init__(self, config: OlmoeConfig, layer_idx: int):
pass
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, output_attentions: Optional[bool]=False, output_router_logits: Optional[bool]=False, use_cache: Optional[bool]=False, cache_position: Optional[torch.LongTensor]=None, position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]]=None, **kwargs) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]:
'''
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`, *optional*):
attention mask of size `(batch_size, sequence_length)` if flash attention is used or `(batch_size, 1,
query_sequence_length, key_sequence_length)` if default attention is used.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_router_logits (`bool`, *optional*):
Whether or not to return the logits of all the routers. They are useful for computing the router loss,
and should not be returned during inference.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
(see `past_key_values`).
past_key_values (`Cache`, *optional*): cached past key and value projection states
cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
Indices depicting the position of the input sequence tokens in the sequence
position_embeddings (`tuple[torch.FloatTensor, torch.FloatTensor]`, *optional*):
Tuple containing the cosine and sine positional embeddings of shape `(batch_size, seq_len, head_dim)`,
with `head_dim` being the embedding dimension of each attention head.
kwargs (`dict`, *optional*):
Arbitrary kwargs to be ignored, used for FSDP and other methods that injects code
into the model
'''
pass
| 4
| 1
| 42
| 5
| 23
| 14
| 3
| 0.57
| 1
| 8
| 4
| 0
| 2
| 5
| 2
| 12
| 85
| 11
| 47
| 24
| 32
| 27
| 25
| 12
| 22
| 4
| 1
| 1
| 5
|
4,228
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/olmoe/modeling_olmoe.py
|
transformers.models.olmoe.modeling_olmoe.OlmoeFlashAttention2
|
from ...cache_utils import Cache, DynamicCache, StaticCache
import torch.nn.functional as F
from ...utils.deprecation import deprecate_kwarg
from typing import Optional, Union
from ...modeling_flash_attention_utils import flash_attn_supports_top_left_mask, is_flash_attn_available
import torch
class OlmoeFlashAttention2(OlmoeAttention):
"""
OLMoE flash attention module. This module inherits from `OlmoeAttention` as the weights of the module stays
untouched. The only required change would be on the forward pass where it needs to correctly call the public API of
flash attention and deal with padding tokens in case the input contains any of them.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._flash_attn_uses_top_left_mask = flash_attn_supports_top_left_mask()
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, output_attentions: bool=False, use_cache: bool=False, cache_position: Optional[torch.LongTensor]=None, position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]]=None, **kwargs) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
output_attentions = False
bsz, q_len, _ = hidden_states.size()
query_states = self.q_norm(self.q_proj(hidden_states))
key_states = self.k_norm(self.k_proj(hidden_states))
value_states = self.v_proj(hidden_states)
if self.config.clip_qkv is not None:
query_states.clamp_(min=-self.config.clip_qkv, max=self.config.clip_qkv)
key_states.clamp_(min=-self.config.clip_qkv, max=self.config.clip_qkv)
value_states.clamp_(min=-self.config.clip_qkv, max=self.config.clip_qkv)
query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
if past_key_values is not None:
cache_kwargs = {'sin': sin, 'cos': cos, 'cache_position': cache_position}
key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
query_states = query_states.transpose(1, 2)
key_states = key_states.transpose(1, 2)
value_states = value_states.transpose(1, 2)
dropout_rate = self.attention_dropout if self.training else 0.0
input_dtype = query_states.dtype
device_type = query_states.device.type if query_states.device.type != 'mps' else 'cpu'
if input_dtype == torch.float32:
if torch.is_autocast_enabled():
target_dtype = torch.get_autocast_dtype(device_type) if hasattr(torch, 'get_autocast_dtype') else torch.get_autocast_gpu_dtype()
elif hasattr(self.config, '_pre_quantization_dtype'):
target_dtype = self.config._pre_quantization_dtype
else:
target_dtype = self.q_proj.weight.dtype
logger.warning_once(f'The input hidden states seems to be silently casted in float32, this might be related to the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in {target_dtype}.')
query_states = query_states.to(target_dtype)
key_states = key_states.to(target_dtype)
value_states = value_states.to(target_dtype)
attn_output = _flash_attention_forward(query_states, key_states, value_states, attention_mask, q_len, dropout=dropout_rate, use_top_left_mask=self._flash_attn_uses_top_left_mask, is_causal=self.is_causal)
attn_output = attn_output.reshape(bsz, q_len, self.hidden_size).contiguous()
attn_output = self.o_proj(attn_output)
if not output_attentions:
attn_weights = None
return (attn_output, attn_weights)
|
class OlmoeFlashAttention2(OlmoeAttention):
'''
OLMoE flash attention module. This module inherits from `OlmoeAttention` as the weights of the module stays
untouched. The only required change would be on the forward pass where it needs to correctly call the public API of
flash attention and deal with padding tokens in case the input contains any of them.
'''
def __init__(self, *args, **kwargs):
pass
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, output_attentions: bool=False, use_cache: bool=False, cache_position: Optional[torch.LongTensor]=None, position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]]=None, **kwargs) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
pass
| 4
| 1
| 49
| 8
| 34
| 8
| 5
| 0.29
| 1
| 4
| 1
| 0
| 2
| 1
| 2
| 14
| 106
| 18
| 68
| 26
| 54
| 20
| 42
| 15
| 39
| 8
| 2
| 2
| 9
|
4,229
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/olmoe/modeling_olmoe.py
|
transformers.models.olmoe.modeling_olmoe.OlmoeForCausalLM
|
from ...modeling_outputs import MoeCausalLMOutputWithPast, MoeModelOutputWithPast
from ...generation import GenerationMixin
from typing import Optional, Union
from ...cache_utils import Cache, DynamicCache, StaticCache
from torch import nn
from ...utils import auto_docstring, logging
import torch
import torch.nn.functional as F
class OlmoeForCausalLM(OlmoePreTrainedModel, GenerationMixin):
_tied_weights_keys = ['lm_head.weight']
def __init__(self, config):
super().__init__(config)
self.model = OlmoeModel(config)
self.vocab_size = config.vocab_size
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.router_aux_loss_coef = config.router_aux_loss_coef
self.num_experts = config.num_experts
self.num_experts_per_tok = config.num_experts_per_tok
self.post_init()
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, output_router_logits: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, logits_to_keep: Union[int, torch.Tensor]=0, **kwargs) -> Union[tuple, MoeCausalLMOutputWithPast]:
"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Example:
```python
>>> from transformers import AutoTokenizer, OlmoeForCausalLM
>>> model = OlmoeForCausalLM.from_pretrained("allenai/OLMoE-1B-7B-0924")
>>> tokenizer = AutoTokenizer.from_pretrained("allenai/OLMoE-1B-7B-0924")
>>> prompt = "Hey, are you conscious? Can you talk to me?"
>>> inputs = tokenizer(prompt, return_tensors="pt")
>>> # Generate
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
'Hey, are you conscious? Can you talk to me?\\nI’m not sure if you’re conscious of this, but I’m'
```
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_router_logits = output_router_logits if output_router_logits is not None else self.config.output_router_logits
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.model(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, output_router_logits=output_router_logits, return_dict=return_dict, cache_position=cache_position)
hidden_states = outputs[0]
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :])
loss = None
if labels is not None:
loss = self.loss_function(logits, labels, self.vocab_size, **kwargs)
aux_loss = None
if output_router_logits:
aux_loss = load_balancing_loss_func(outputs.router_logits if return_dict else outputs[-1], self.num_experts, self.num_experts_per_tok, attention_mask)
if labels is not None:
loss += self.router_aux_loss_coef * aux_loss.to(loss.device)
if not return_dict:
output = (logits,) + outputs[1:]
if output_router_logits:
output = (aux_loss,) + output
return (loss,) + output if loss is not None else output
return MoeCausalLMOutputWithPast(loss=loss, aux_loss=aux_loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, router_logits=outputs.router_logits)
|
class OlmoeForCausalLM(OlmoePreTrainedModel, GenerationMixin):
def __init__(self, config):
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, output_router_logits: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, logits_to_keep: Union[int, torch.Tensor]=0, **kwargs) -> Union[tuple, MoeCausalLMOutputWithPast]:
'''
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Example:
```python
>>> from transformers import AutoTokenizer, OlmoeForCausalLM
>>> model = OlmoeForCausalLM.from_pretrained("allenai/OLMoE-1B-7B-0924")
>>> tokenizer = AutoTokenizer.from_pretrained("allenai/OLMoE-1B-7B-0924")
>>> prompt = "Hey, are you conscious? Can you talk to me?"
>>> inputs = tokenizer(prompt, return_tensors="pt")
>>> # Generate
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
'Hey, are you conscious? Can you talk to me?\nI’m not sure if you’re conscious of this, but I’m'
```
'''
pass
| 4
| 1
| 17
| 2
| 11
| 4
| 3
| 0.32
| 2
| 7
| 2
| 0
| 8
| 6
| 8
| 9
| 145
| 22
| 94
| 40
| 66
| 30
| 46
| 23
| 37
| 13
| 2
| 2
| 20
|
4,230
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/olmoe/modeling_olmoe.py
|
transformers.models.olmoe.modeling_olmoe.OlmoeMLP
|
from torch import nn
from ...activations import ACT2FN
class OlmoeMLP(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.hidden_size = config.hidden_size
self.intermediate_size = config.intermediate_size
self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
self.act_fn = ACT2FN[config.hidden_act]
def forward(self, x):
down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
return down_proj
|
class OlmoeMLP(nn.Module):
def __init__(self, config):
pass
def forward(self, x):
pass
| 3
| 0
| 6
| 0
| 6
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 2
| 7
| 2
| 12
| 14
| 1
| 13
| 11
| 10
| 0
| 13
| 11
| 10
| 1
| 1
| 0
| 2
|
4,231
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/olmoe/modeling_olmoe.py
|
transformers.models.olmoe.modeling_olmoe.OlmoeModel
|
from ...modeling_attn_mask_utils import AttentionMaskConverter
from typing import Optional, Union
from .configuration_olmoe import OlmoeConfig
from ...utils import auto_docstring, logging
from ...modeling_outputs import MoeCausalLMOutputWithPast, MoeModelOutputWithPast
import torch
from ...cache_utils import Cache, DynamicCache, StaticCache
from torch import nn
import torch.nn.functional as F
@auto_docstring
class OlmoeModel(OlmoePreTrainedModel):
def __init__(self, config: OlmoeConfig):
super().__init__(config)
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
self.layers = nn.ModuleList([OlmoeDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)])
self.norm = OlmoeRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.rotary_emb = OlmoeRotaryEmbedding(config=config)
self.gradient_checkpointing = False
self.post_init()
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Union[Cache, list[torch.FloatTensor]]]=None, inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, output_router_logits: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None) -> Union[tuple, MoeModelOutputWithPast]:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_router_logits = output_router_logits if output_router_logits is not None else self.config.output_router_logits
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError('You must specify exactly one of input_ids or inputs_embeds')
if self.gradient_checkpointing and self.training and use_cache:
logger.warning_once('`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`.')
use_cache = False
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids)
if use_cache and past_key_values is None:
past_key_values = DynamicCache(config=self.config)
if cache_position is None:
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
cache_position = torch.arange(past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device)
if position_ids is None:
position_ids = cache_position.unsqueeze(0)
causal_mask = self._update_causal_mask(attention_mask, inputs_embeds, cache_position, past_key_values, output_attentions)
hidden_states = inputs_embeds
position_embeddings = self.rotary_emb(hidden_states, position_ids)
all_hidden_states = () if output_hidden_states else None
all_self_attns = () if output_attentions else None
all_router_logits = () if output_router_logits else None
for decoder_layer in self.layers[:self.config.num_hidden_layers]:
if output_hidden_states:
all_hidden_states += (hidden_states,)
layer_outputs = decoder_layer(hidden_states, attention_mask=causal_mask, position_ids=position_ids, past_key_values=past_key_values, output_attentions=output_attentions, output_router_logits=output_router_logits, use_cache=use_cache, cache_position=cache_position, position_embeddings=position_embeddings)
hidden_states = layer_outputs[0]
if output_attentions:
all_self_attns += (layer_outputs[1],)
if output_router_logits and layer_outputs[-1] is not None:
all_router_logits += (layer_outputs[-1],)
hidden_states = self.norm(hidden_states)
if output_hidden_states:
all_hidden_states += (hidden_states,)
if not return_dict:
return tuple((v for v in [hidden_states, past_key_values, all_hidden_states, all_self_attns] if v is not None))
return MoeModelOutputWithPast(last_hidden_state=hidden_states, past_key_values=past_key_values, hidden_states=all_hidden_states, attentions=all_self_attns, router_logits=all_router_logits)
def _update_causal_mask(self, attention_mask: torch.Tensor, input_tensor: torch.Tensor, cache_position: torch.Tensor, past_key_values: Cache, output_attentions: bool):
if self.config._attn_implementation == 'flash_attention_2':
if attention_mask is not None and 0.0 in attention_mask:
return attention_mask
return None
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
using_static_cache = isinstance(past_key_values, StaticCache)
if self.config._attn_implementation == 'sdpa' and (not using_static_cache) and (not output_attentions):
if AttentionMaskConverter._ignore_causal_mask_sdpa(attention_mask, inputs_embeds=input_tensor, past_key_values_length=past_seen_tokens, is_training=self.training):
return None
dtype, device = (input_tensor.dtype, input_tensor.device)
sequence_length = input_tensor.shape[1]
if using_static_cache:
target_length = past_key_values.get_max_cache_shape()
else:
target_length = attention_mask.shape[-1] if isinstance(attention_mask, torch.Tensor) else past_seen_tokens + sequence_length + 1
causal_mask = self._prepare_4d_causal_attention_mask_with_cache_position(attention_mask, sequence_length=sequence_length, target_length=target_length, dtype=dtype, device=device, cache_position=cache_position, batch_size=input_tensor.shape[0])
if self.config._attn_implementation == 'sdpa' and attention_mask is not None and (attention_mask.device.type in ['cuda', 'xpu', 'npu']) and (not output_attentions):
min_dtype = torch.finfo(dtype).min
causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype)
return causal_mask
@staticmethod
def _prepare_4d_causal_attention_mask_with_cache_position(attention_mask: torch.Tensor, sequence_length: int, target_length: int, dtype: torch.dtype, device: torch.device, cache_position: torch.Tensor, batch_size: int, **kwargs):
"""
Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
`(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.
Args:
attention_mask (`torch.Tensor`):
A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape
`(batch_size, 1, query_length, key_value_length)`.
sequence_length (`int`):
The sequence length being processed.
target_length (`int`):
The target length: when generating with static cache, the mask should be as long as the static cache,
to account for the 0 padding, the part of the cache that is not filled yet.
dtype (`torch.dtype`):
The dtype to use for the 4D attention mask.
device (`torch.device`):
The device to place the 4D attention mask on.
cache_position (`torch.Tensor`):
Indices depicting the position of the input sequence tokens in the sequence.
batch_size (`torch.Tensor`):
Batch size.
"""
if attention_mask is not None and attention_mask.dim() == 4:
causal_mask = attention_mask
else:
min_dtype = torch.finfo(dtype).min
causal_mask = torch.full((sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=device)
if sequence_length != 1:
causal_mask = torch.triu(causal_mask, diagonal=1)
causal_mask *= torch.arange(target_length, device=device) > cache_position.reshape(-1, 1)
causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1)
if attention_mask is not None:
causal_mask = causal_mask.clone()
mask_length = attention_mask.shape[-1]
padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :]
padding_mask = padding_mask == 0
causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(padding_mask, min_dtype)
return causal_mask
|
@auto_docstring
class OlmoeModel(OlmoePreTrainedModel):
def __init__(self, config: OlmoeConfig):
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Union[Cache, list[torch.FloatTensor]]]=None, inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, output_router_logits: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None) -> Union[tuple, MoeModelOutputWithPast]:
pass
def _update_causal_mask(self, attention_mask: torch.Tensor, input_tensor: torch.Tensor, cache_position: torch.Tensor, past_key_values: Cache, output_attentions: bool):
pass
@staticmethod
def _prepare_4d_causal_attention_mask_with_cache_position(attention_mask: torch.Tensor, sequence_length: int, target_length: int, dtype: torch.dtype, device: torch.device, cache_position: torch.Tensor, batch_size: int, **kwargs):
'''
Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
`(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.
Args:
attention_mask (`torch.Tensor`):
A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape
`(batch_size, 1, query_length, key_value_length)`.
sequence_length (`int`):
The sequence length being processed.
target_length (`int`):
The target length: when generating with static cache, the mask should be as long as the static cache,
to account for the 0 padding, the part of the cache that is not filled yet.
dtype (`torch.dtype`):
The dtype to use for the 4D attention mask.
device (`torch.device`):
The device to place the 4D attention mask on.
cache_position (`torch.Tensor`):
Indices depicting the position of the input sequence tokens in the sequence.
batch_size (`torch.Tensor`):
Batch size.
'''
pass
| 8
| 1
| 45
| 5
| 34
| 6
| 7
| 0.21
| 1
| 16
| 9
| 0
| 5
| 7
| 6
| 7
| 287
| 36
| 209
| 68
| 171
| 44
| 105
| 37
| 98
| 28
| 2
| 2
| 44
|
4,232
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/olmoe/modeling_olmoe.py
|
transformers.models.olmoe.modeling_olmoe.OlmoePreTrainedModel
|
from .configuration_olmoe import OlmoeConfig
from ...modeling_utils import PreTrainedModel
from ...utils import auto_docstring, logging
from torch import nn
@auto_docstring
class OlmoePreTrainedModel(PreTrainedModel):
config: OlmoeConfig
base_model_prefix = 'model'
supports_gradient_checkpointing = True
_no_split_modules = ['OlmoeDecoderLayer']
_skip_keys_device_placement = ['past_key_values']
_supports_flash_attn = True
_supports_sdpa = True
_can_compile_fullgraph = False
def _init_weights(self, module):
std = self.config.initializer_range
if isinstance(module, nn.Linear):
module.weight.data.normal_(mean=0.0, std=std)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, OlmoeRMSNorm):
module.weight.data.fill_(1.0)
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=std)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
|
@auto_docstring
class OlmoePreTrainedModel(PreTrainedModel):
def _init_weights(self, module):
pass
| 3
| 0
| 10
| 0
| 10
| 0
| 5
| 0.05
| 1
| 0
| 0
| 2
| 1
| 0
| 1
| 1
| 22
| 1
| 21
| 13
| 19
| 1
| 20
| 13
| 18
| 5
| 1
| 2
| 5
|
4,233
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/olmoe/modeling_olmoe.py
|
transformers.models.olmoe.modeling_olmoe.OlmoeRMSNorm
|
import torch.nn.functional as F
import torch
from torch import nn
class OlmoeRMSNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-05):
"""
OlmoeRMSNorm is equivalent to T5LayerNorm
"""
super().__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.variance_epsilon = eps
def forward(self, hidden_states):
input_dtype = hidden_states.dtype
hidden_states = hidden_states.to(torch.float32)
variance = hidden_states.pow(2).mean(-1, keepdim=True)
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
return self.weight * hidden_states.to(input_dtype)
def extra_repr(self):
return f'{tuple(self.weight.shape)}, eps={self.variance_epsilon}'
|
class OlmoeRMSNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-05):
'''
OlmoeRMSNorm is equivalent to T5LayerNorm
'''
pass
def forward(self, hidden_states):
pass
def extra_repr(self):
pass
| 4
| 1
| 5
| 0
| 4
| 1
| 1
| 0.23
| 1
| 2
| 0
| 0
| 3
| 2
| 3
| 13
| 18
| 2
| 13
| 8
| 9
| 3
| 13
| 8
| 9
| 1
| 1
| 0
| 3
|
4,234
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/olmoe/modeling_olmoe.py
|
transformers.models.olmoe.modeling_olmoe.OlmoeRotaryEmbedding
|
from .configuration_olmoe import OlmoeConfig
import torch
import torch.nn.functional as F
from ...modeling_rope_utils import ROPE_INIT_FUNCTIONS, dynamic_rope_update
from torch import nn
class OlmoeRotaryEmbedding(nn.Module):
inv_freq: torch.Tensor
def __init__(self, config: OlmoeConfig, device=None):
super().__init__()
if hasattr(config, 'rope_scaling') and isinstance(config.rope_scaling, dict):
self.rope_type = config.rope_scaling.get('rope_type', config.rope_scaling.get('type'))
else:
self.rope_type = 'default'
self.max_seq_len_cached = config.max_position_embeddings
self.original_max_seq_len = config.max_position_embeddings
self.config = config
self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device)
self.register_buffer('inv_freq', inv_freq, persistent=False)
self.original_inv_freq = self.inv_freq
@torch.no_grad()
@dynamic_rope_update
def forward(self, x, position_ids):
inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device)
position_ids_expanded = position_ids[:, None, :].float()
device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != 'mps' else 'cpu'
with torch.autocast(device_type=device_type, enabled=False):
freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
emb = torch.cat((freqs, freqs), dim=-1)
cos = emb.cos() * self.attention_scaling
sin = emb.sin() * self.attention_scaling
return (cos.to(dtype=x.dtype), sin.to(dtype=x.dtype))
|
class OlmoeRotaryEmbedding(nn.Module):
def __init__(self, config: OlmoeConfig, device=None):
pass
@torch.no_grad()
@dynamic_rope_update
def forward(self, x, position_ids):
pass
| 5
| 0
| 18
| 2
| 13
| 5
| 3
| 0.35
| 1
| 4
| 1
| 0
| 3
| 7
| 3
| 13
| 59
| 8
| 40
| 21
| 35
| 14
| 38
| 20
| 34
| 3
| 1
| 1
| 8
|
4,235
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/olmoe/modeling_olmoe.py
|
transformers.models.olmoe.modeling_olmoe.OlmoeSdpaAttention
|
from ...cache_utils import Cache, DynamicCache, StaticCache
import torch
from ...utils.deprecation import deprecate_kwarg
import torch.nn.functional as F
from typing import Optional, Union
class OlmoeSdpaAttention(OlmoeAttention):
"""
OLMoE attention module using torch.nn.functional.scaled_dot_product_attention. This module inherits from
`OlmoeAttention` as the weights of the module stays untouched. The only changes are on the forward pass to adapt to
SDPA API.
"""
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, output_attentions: bool=False, use_cache: bool=False, cache_position: Optional[torch.LongTensor]=None, position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]]=None) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
if output_attentions:
logger.warning_once('OlmoeModel is using OlmoeSdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to the manual attention implementation, but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.')
return super().forward(hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position, position_embeddings=position_embeddings)
bsz, q_len, _ = hidden_states.size()
query_states = self.q_norm(self.q_proj(hidden_states))
key_states = self.k_norm(self.k_proj(hidden_states))
value_states = self.v_proj(hidden_states)
if self.config.clip_qkv is not None:
query_states.clamp_(min=-self.config.clip_qkv, max=self.config.clip_qkv)
key_states.clamp_(min=-self.config.clip_qkv, max=self.config.clip_qkv)
value_states.clamp_(min=-self.config.clip_qkv, max=self.config.clip_qkv)
query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
if past_key_values is not None:
cache_kwargs = {'sin': sin, 'cos': cos, 'cache_position': cache_position}
key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
key_states = repeat_kv(key_states, self.num_key_value_groups)
value_states = repeat_kv(value_states, self.num_key_value_groups)
causal_mask = attention_mask
if attention_mask is not None:
causal_mask = causal_mask[:, :, :, :key_states.shape[-2]]
if query_states.device.type == 'cuda' and causal_mask is not None:
query_states = query_states.contiguous()
key_states = key_states.contiguous()
value_states = value_states.contiguous()
is_causal = causal_mask is None and q_len > 1
attn_output = torch.nn.functional.scaled_dot_product_attention(query_states, key_states, value_states, attn_mask=causal_mask, dropout_p=self.attention_dropout if self.training else 0.0, is_causal=is_causal)
attn_output = attn_output.transpose(1, 2).contiguous()
attn_output = attn_output.view(bsz, q_len, self.hidden_size)
attn_output = self.o_proj(attn_output)
return (attn_output, None)
|
class OlmoeSdpaAttention(OlmoeAttention):
'''
OLMoE attention module using torch.nn.functional.scaled_dot_product_attention. This module inherits from
`OlmoeAttention` as the weights of the module stays untouched. The only changes are on the forward pass to adapt to
SDPA API.
'''
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, output_attentions: bool=False, use_cache: bool=False, cache_position: Optional[torch.LongTensor]=None, position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]]=None) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
pass
| 3
| 1
| 85
| 14
| 64
| 7
| 8
| 0.2
| 1
| 4
| 1
| 0
| 1
| 0
| 1
| 13
| 93
| 15
| 65
| 21
| 53
| 13
| 36
| 11
| 34
| 8
| 2
| 1
| 8
|
4,236
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/olmoe/modeling_olmoe.py
|
transformers.models.olmoe.modeling_olmoe.OlmoeSparseMoeBlock
|
import torch.nn.functional as F
from torch import nn
import torch
class OlmoeSparseMoeBlock(nn.Module):
def __init__(self, config):
super().__init__()
self.num_experts = config.num_experts
self.top_k = config.num_experts_per_tok
self.norm_topk_prob = config.norm_topk_prob
self.gate = nn.Linear(config.hidden_size, self.num_experts, bias=False)
self.experts = nn.ModuleList([OlmoeMLP(config) for _ in range(self.num_experts)])
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
batch_size, sequence_length, hidden_dim = hidden_states.shape
hidden_states = hidden_states.view(-1, hidden_dim)
router_logits = self.gate(hidden_states)
routing_weights = F.softmax(router_logits, dim=1, dtype=torch.float)
routing_weights, selected_experts = torch.topk(routing_weights, self.top_k, dim=-1)
if self.norm_topk_prob:
routing_weights /= routing_weights.sum(dim=-1, keepdim=True)
routing_weights = routing_weights.to(hidden_states.dtype)
final_hidden_states = torch.zeros((batch_size * sequence_length, hidden_dim), dtype=hidden_states.dtype, device=hidden_states.device)
expert_mask = torch.nn.functional.one_hot(selected_experts, num_classes=self.num_experts).permute(2, 1, 0)
for expert_idx in range(self.num_experts):
expert_layer = self.experts[expert_idx]
idx, top_x = torch.where(expert_mask[expert_idx])
current_state = hidden_states[None, top_x].reshape(-1, hidden_dim)
current_hidden_states = expert_layer(current_state) * routing_weights[top_x, idx, None]
final_hidden_states.index_add_(0, top_x, current_hidden_states.to(hidden_states.dtype))
final_hidden_states = final_hidden_states.reshape(batch_size, sequence_length, hidden_dim)
return (final_hidden_states, router_logits)
|
class OlmoeSparseMoeBlock(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 22
| 3
| 14
| 5
| 2
| 0.34
| 1
| 5
| 1
| 0
| 2
| 5
| 2
| 12
| 46
| 7
| 29
| 19
| 26
| 10
| 27
| 19
| 24
| 3
| 1
| 1
| 4
|
4,237
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/omdet_turbo/configuration_omdet_turbo.py
|
transformers.models.omdet_turbo.configuration_omdet_turbo.OmDetTurboConfig
|
from ...utils.backbone_utils import verify_backbone_config_arguments
from ..auto import CONFIG_MAPPING
from ...configuration_utils import PretrainedConfig
class OmDetTurboConfig(PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`OmDetTurboForObjectDetection`].
It is used to instantiate a OmDet-Turbo model according to the specified arguments, defining the model architecture
Instantiating a configuration with the defaults will yield a similar configuration to that of the OmDet-Turbo
[omlab/omdet-turbo-swin-tiny-hf](https://huggingface.co/omlab/omdet-turbo-swin-tiny-hf) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
text_config (`PretrainedConfig`, *optional*):
The configuration of the text backbone.
backbone_config (`PretrainedConfig`, *optional*):
The configuration of the vision backbone.
use_timm_backbone (`bool`, *optional*, defaults to `True`):
Whether to use the timm for the vision backbone.
backbone (`str`, *optional*, defaults to `"swin_tiny_patch4_window7_224"`):
The name of the pretrained vision backbone to use. If `use_pretrained_backbone=False` a randomly initialized
backbone with the same architecture `backbone` is used.
backbone_kwargs (`dict`, *optional*):
Additional kwargs for the vision backbone.
use_pretrained_backbone (`bool`, *optional*, defaults to `False`):
Whether to use a pretrained vision backbone.
apply_layernorm_after_vision_backbone (`bool`, *optional*, defaults to `True`):
Whether to apply layer normalization on the feature maps of the vision backbone output.
image_size (`int`, *optional*, defaults to 640):
The size (resolution) of each image.
disable_custom_kernels (`bool`, *optional*, defaults to `False`):
Whether to disable custom kernels.
layer_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon value for layer normalization.
batch_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon value for batch normalization.
init_std (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
text_projection_in_dim (`int`, *optional*, defaults to 512):
The input dimension for the text projection.
text_projection_out_dim (`int`, *optional*, defaults to 512):
The output dimension for the text projection.
task_encoder_hidden_dim (`int`, *optional*, defaults to 1024):
The feedforward dimension for the task encoder.
class_embed_dim (`int`, *optional*, defaults to 512):
The dimension of the classes embeddings.
class_distance_type (`str`, *optional*, defaults to `"cosine"`):
The type of of distance to compare predicted classes to projected classes embeddings.
Can be `"cosine"` or `"dot"`.
num_queries (`int`, *optional*, defaults to 900):
The number of queries.
csp_activation (`str`, *optional*, defaults to `"silu"`):
The activation function of the Cross Stage Partial (CSP) networks of the encoder.
conv_norm_activation (`str`, *optional*, defaults to `"gelu"`):
The activation function of the ConvNormLayer layers of the encoder.
encoder_feedforward_activation (`str`, *optional*, defaults to `"relu"`):
The activation function for the feedforward network of the encoder.
encoder_feedforward_dropout (`float`, *optional*, defaults to 0.0):
The dropout rate following the activation of the encoder feedforward network.
encoder_dropout (`float`, *optional*, defaults to 0.0):
The dropout rate of the encoder multi-head attention module.
hidden_expansion (`int`, *optional*, defaults to 1):
The hidden expansion of the CSP networks in the encoder.
vision_features_channels (`tuple(int)`, *optional*, defaults to `[256, 256, 256]`):
The projected vision features channels used as inputs for the decoder.
encoder_hidden_dim (`int`, *optional*, defaults to 256):
The hidden dimension of the encoder.
encoder_in_channels (`List(int)`, *optional*, defaults to `[192, 384, 768]`):
The input channels for the encoder.
encoder_projection_indices (`List(int)`, *optional*, defaults to `[2]`):
The indices of the input features projected by each layers.
encoder_attention_heads (`int`, *optional*, defaults to 8):
The number of attention heads for the encoder.
encoder_dim_feedforward (`int`, *optional*, defaults to 2048):
The feedforward dimension for the encoder.
encoder_layers (`int`, *optional*, defaults to 1):
The number of layers in the encoder.
positional_encoding_temperature (`int`, *optional*, defaults to 10000):
The positional encoding temperature in the encoder.
num_feature_levels (`int`, *optional*, defaults to 3):
The number of feature levels for the multi-scale deformable attention module of the decoder.
decoder_hidden_dim (`int`, *optional*, defaults to 256):
The hidden dimension of the decoder.
decoder_num_heads (`int`, *optional*, defaults to 8):
The number of heads for the decoder.
decoder_num_layers (`int`, *optional*, defaults to 6):
The number of layers for the decoder.
decoder_activation (`str`, *optional*, defaults to `"relu"`):
The activation function for the decoder.
decoder_dim_feedforward (`int`, *optional*, defaults to 2048):
The feedforward dimension for the decoder.
decoder_num_points (`int`, *optional*, defaults to 4):
The number of points sampled in the decoder multi-scale deformable attention module.
decoder_dropout (`float`, *optional*, defaults to 0.0):
The dropout rate for the decoder.
eval_size (`tuple[int, int]`, *optional*):
Height and width used to computes the effective height and width of the position embeddings after taking
into account the stride (see RTDetr).
learn_initial_query (`bool`, *optional*, defaults to `False`):
Whether to learn the initial query.
cache_size (`int`, *optional*, defaults to 100):
The cache size for the classes and prompts caches.
is_encoder_decoder (`bool`, *optional*, defaults to `True`):
Whether the model is used as an encoder-decoder model or not.
kwargs (`dict[str, Any]`, *optional*):
Additional parameters from the architecture. The values in kwargs will be saved as part of the configuration
and can be used to control the model outputs.
Examples:
```python
>>> from transformers import OmDetTurboConfig, OmDetTurboForObjectDetection
>>> # Initializing a OmDet-Turbo omlab/omdet-turbo-swin-tiny-hf style configuration
>>> configuration = OmDetTurboConfig()
>>> # Initializing a model (with random weights) from the omlab/omdet-turbo-swin-tiny-hf style configuration
>>> model = OmDetTurboForObjectDetection(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = 'omdet-turbo'
attribute_map = {'encoder_hidden_dim': 'd_model', 'num_attention_heads': 'encoder_attention_heads'}
def __init__(self, text_config=None, backbone_config=None, use_timm_backbone=True, backbone='swin_tiny_patch4_window7_224', backbone_kwargs=None, use_pretrained_backbone=False, apply_layernorm_after_vision_backbone=True, image_size=640, disable_custom_kernels=False, layer_norm_eps=1e-05, batch_norm_eps=1e-05, init_std=0.02, text_projection_in_dim=512, text_projection_out_dim=512, task_encoder_hidden_dim=1024, class_embed_dim=512, class_distance_type='cosine', num_queries=900, csp_activation='silu', conv_norm_activation='gelu', encoder_feedforward_activation='relu', encoder_feedforward_dropout=0.0, encoder_dropout=0.0, hidden_expansion=1, vision_features_channels=[256, 256, 256], encoder_hidden_dim=256, encoder_in_channels=[192, 384, 768], encoder_projection_indices=[2], encoder_attention_heads=8, encoder_dim_feedforward=2048, encoder_layers=1, positional_encoding_temperature=10000, num_feature_levels=3, decoder_hidden_dim=256, decoder_num_heads=8, decoder_num_layers=6, decoder_activation='relu', decoder_dim_feedforward=2048, decoder_num_points=4, decoder_dropout=0.0, eval_size=None, learn_initial_query=False, cache_size=100, is_encoder_decoder=True, **kwargs):
if use_timm_backbone:
if backbone_config is None:
backbone_kwargs = {'out_indices': [1, 2, 3], 'img_size': image_size, 'always_partition': True}
elif backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `swin` vision config.')
backbone_config = CONFIG_MAPPING['swin'](window_size=7, image_size=image_size, embed_dim=96, depths=[2, 2, 6, 2], num_heads=[3, 6, 12, 24], out_indices=[2, 3, 4])
elif isinstance(backbone_config, dict):
backbone_model_type = backbone_config.get('model_type')
config_class = CONFIG_MAPPING[backbone_model_type]
backbone_config = config_class.from_dict(backbone_config)
verify_backbone_config_arguments(use_timm_backbone=use_timm_backbone, use_pretrained_backbone=use_pretrained_backbone, backbone=backbone, backbone_config=backbone_config, backbone_kwargs=backbone_kwargs)
if text_config is None:
logger.info('`text_config` is `None`. Initializing the config with the default `clip_text_model` text config.')
text_config = CONFIG_MAPPING['clip_text_model']()
elif isinstance(text_config, dict):
text_model_type = text_config.get('model_type')
text_config = CONFIG_MAPPING[text_model_type](**text_config)
if class_distance_type not in ['cosine', 'dot']:
raise ValueError(f'Invalid `class_distance_type`. It should be either `cosine` or `dot`, but got {class_distance_type}.')
self.text_config = text_config
self.backbone_config = backbone_config
self.use_timm_backbone = use_timm_backbone
self.backbone = backbone
self.backbone_kwargs = backbone_kwargs
self.use_pretrained_backbone = use_pretrained_backbone
self.apply_layernorm_after_vision_backbone = apply_layernorm_after_vision_backbone
self.image_size = image_size
self.disable_custom_kernels = disable_custom_kernels
self.layer_norm_eps = layer_norm_eps
self.batch_norm_eps = batch_norm_eps
self.init_std = init_std
self.text_projection_in_dim = text_projection_in_dim
self.text_projection_out_dim = text_projection_out_dim
self.task_encoder_hidden_dim = task_encoder_hidden_dim
self.class_embed_dim = class_embed_dim
self.class_distance_type = class_distance_type
self.num_queries = num_queries
self.csp_activation = csp_activation
self.conv_norm_activation = conv_norm_activation
self.encoder_feedforward_activation = encoder_feedforward_activation
self.encoder_feedforward_dropout = encoder_feedforward_dropout
self.encoder_dropout = encoder_dropout
self.hidden_expansion = hidden_expansion
self.vision_features_channels = vision_features_channels
self.encoder_hidden_dim = encoder_hidden_dim
self.encoder_in_channels = encoder_in_channels
self.encoder_projection_indices = encoder_projection_indices
self.encoder_attention_heads = encoder_attention_heads
self.encoder_dim_feedforward = encoder_dim_feedforward
self.encoder_layers = encoder_layers
self.positional_encoding_temperature = positional_encoding_temperature
self.num_feature_levels = num_feature_levels
self.decoder_hidden_dim = decoder_hidden_dim
self.decoder_num_heads = decoder_num_heads
self.decoder_num_layers = decoder_num_layers
self.decoder_activation = decoder_activation
self.decoder_dim_feedforward = decoder_dim_feedforward
self.decoder_num_points = decoder_num_points
self.decoder_dropout = decoder_dropout
self.eval_size = eval_size
self.learn_initial_query = learn_initial_query
self.cache_size = cache_size
self.is_encoder_decoder = is_encoder_decoder
super().__init__(is_encoder_decoder=is_encoder_decoder, **kwargs)
@property
def sub_configs(self):
sub_configs = {}
backbone_config = getattr(self, 'backbone_config', None)
text_config = getattr(self, 'text_config', None)
if isinstance(backbone_config, PretrainedConfig):
sub_configs['backbone_config'] = type(backbone_config)
if isinstance(text_config, PretrainedConfig):
sub_configs['text_config'] = type(text_config)
return sub_configs
|
class OmDetTurboConfig(PretrainedConfig):
'''
This is the configuration class to store the configuration of a [`OmDetTurboForObjectDetection`].
It is used to instantiate a OmDet-Turbo model according to the specified arguments, defining the model architecture
Instantiating a configuration with the defaults will yield a similar configuration to that of the OmDet-Turbo
[omlab/omdet-turbo-swin-tiny-hf](https://huggingface.co/omlab/omdet-turbo-swin-tiny-hf) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
text_config (`PretrainedConfig`, *optional*):
The configuration of the text backbone.
backbone_config (`PretrainedConfig`, *optional*):
The configuration of the vision backbone.
use_timm_backbone (`bool`, *optional*, defaults to `True`):
Whether to use the timm for the vision backbone.
backbone (`str`, *optional*, defaults to `"swin_tiny_patch4_window7_224"`):
The name of the pretrained vision backbone to use. If `use_pretrained_backbone=False` a randomly initialized
backbone with the same architecture `backbone` is used.
backbone_kwargs (`dict`, *optional*):
Additional kwargs for the vision backbone.
use_pretrained_backbone (`bool`, *optional*, defaults to `False`):
Whether to use a pretrained vision backbone.
apply_layernorm_after_vision_backbone (`bool`, *optional*, defaults to `True`):
Whether to apply layer normalization on the feature maps of the vision backbone output.
image_size (`int`, *optional*, defaults to 640):
The size (resolution) of each image.
disable_custom_kernels (`bool`, *optional*, defaults to `False`):
Whether to disable custom kernels.
layer_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon value for layer normalization.
batch_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon value for batch normalization.
init_std (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
text_projection_in_dim (`int`, *optional*, defaults to 512):
The input dimension for the text projection.
text_projection_out_dim (`int`, *optional*, defaults to 512):
The output dimension for the text projection.
task_encoder_hidden_dim (`int`, *optional*, defaults to 1024):
The feedforward dimension for the task encoder.
class_embed_dim (`int`, *optional*, defaults to 512):
The dimension of the classes embeddings.
class_distance_type (`str`, *optional*, defaults to `"cosine"`):
The type of of distance to compare predicted classes to projected classes embeddings.
Can be `"cosine"` or `"dot"`.
num_queries (`int`, *optional*, defaults to 900):
The number of queries.
csp_activation (`str`, *optional*, defaults to `"silu"`):
The activation function of the Cross Stage Partial (CSP) networks of the encoder.
conv_norm_activation (`str`, *optional*, defaults to `"gelu"`):
The activation function of the ConvNormLayer layers of the encoder.
encoder_feedforward_activation (`str`, *optional*, defaults to `"relu"`):
The activation function for the feedforward network of the encoder.
encoder_feedforward_dropout (`float`, *optional*, defaults to 0.0):
The dropout rate following the activation of the encoder feedforward network.
encoder_dropout (`float`, *optional*, defaults to 0.0):
The dropout rate of the encoder multi-head attention module.
hidden_expansion (`int`, *optional*, defaults to 1):
The hidden expansion of the CSP networks in the encoder.
vision_features_channels (`tuple(int)`, *optional*, defaults to `[256, 256, 256]`):
The projected vision features channels used as inputs for the decoder.
encoder_hidden_dim (`int`, *optional*, defaults to 256):
The hidden dimension of the encoder.
encoder_in_channels (`List(int)`, *optional*, defaults to `[192, 384, 768]`):
The input channels for the encoder.
encoder_projection_indices (`List(int)`, *optional*, defaults to `[2]`):
The indices of the input features projected by each layers.
encoder_attention_heads (`int`, *optional*, defaults to 8):
The number of attention heads for the encoder.
encoder_dim_feedforward (`int`, *optional*, defaults to 2048):
The feedforward dimension for the encoder.
encoder_layers (`int`, *optional*, defaults to 1):
The number of layers in the encoder.
positional_encoding_temperature (`int`, *optional*, defaults to 10000):
The positional encoding temperature in the encoder.
num_feature_levels (`int`, *optional*, defaults to 3):
The number of feature levels for the multi-scale deformable attention module of the decoder.
decoder_hidden_dim (`int`, *optional*, defaults to 256):
The hidden dimension of the decoder.
decoder_num_heads (`int`, *optional*, defaults to 8):
The number of heads for the decoder.
decoder_num_layers (`int`, *optional*, defaults to 6):
The number of layers for the decoder.
decoder_activation (`str`, *optional*, defaults to `"relu"`):
The activation function for the decoder.
decoder_dim_feedforward (`int`, *optional*, defaults to 2048):
The feedforward dimension for the decoder.
decoder_num_points (`int`, *optional*, defaults to 4):
The number of points sampled in the decoder multi-scale deformable attention module.
decoder_dropout (`float`, *optional*, defaults to 0.0):
The dropout rate for the decoder.
eval_size (`tuple[int, int]`, *optional*):
Height and width used to computes the effective height and width of the position embeddings after taking
into account the stride (see RTDetr).
learn_initial_query (`bool`, *optional*, defaults to `False`):
Whether to learn the initial query.
cache_size (`int`, *optional*, defaults to 100):
The cache size for the classes and prompts caches.
is_encoder_decoder (`bool`, *optional*, defaults to `True`):
Whether the model is used as an encoder-decoder model or not.
kwargs (`dict[str, Any]`, *optional*):
Additional parameters from the architecture. The values in kwargs will be saved as part of the configuration
and can be used to control the model outputs.
Examples:
```python
>>> from transformers import OmDetTurboConfig, OmDetTurboForObjectDetection
>>> # Initializing a OmDet-Turbo omlab/omdet-turbo-swin-tiny-hf style configuration
>>> configuration = OmDetTurboConfig()
>>> # Initializing a model (with random weights) from the omlab/omdet-turbo-swin-tiny-hf style configuration
>>> model = OmDetTurboForObjectDetection(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```'''
def __init__(self, text_config=None, backbone_config=None, use_timm_backbone=True, backbone='swin_tiny_patch4_window7_224', backbone_kwargs=None, use_pretrained_backbone=False, apply_layernorm_after_vision_backbone=True, image_size=640, disable_custom_kernels=False, layer_norm_eps=1e-05, batch_norm_eps=1e-05, init_std=0.02, text_projection_in_dim=512, text_projection_out_dim=512, task_encoder_hidden_dim=1024, class_embed_dim=512, class_distance_type='cosine', num_queries=900, csp_activation='silu', conv_norm_activation='gelu', encoder_feedforward_activation='relu', encoder_feedforward_dropout=0.0, encoder_dropout=0.0, hidden_expansion=1, vision_features_channels=[256, 256, 256], encoder_hidden_dim=256, encoder_in_channels=[192, 384, 768], encoder_projection_indices=[2], encoder_attention_heads=8, encoder_dim_feedforward=2048, encoder_layers=1, positional_encoding_temperature=10000, num_feature_levels=3, decoder_hidden_dim=256, decoder_num_heads=8, decoder_num_layers=6, decoder_activation='relu', decoder_dim_feedforward=2048, decoder_num_points=4, decoder_dropout=0.0, eval_size=None, learn_initial_query=False, cache_size=100, is_encoder_decoder=True, **kwargs):
pass
@property
def sub_configs(self):
pass
| 4
| 1
| 138
| 5
| 133
| 0
| 8
| 0.81
| 1
| 3
| 0
| 0
| 1
| 44
| 1
| 1
| 265
| 14
| 139
| 98
| 90
| 112
| 65
| 51
| 63
| 8
| 1
| 2
| 8
|
4,238
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/omdet_turbo/modeling_omdet_turbo.py
|
transformers.models.omdet_turbo.modeling_omdet_turbo.OmDetTurboCSPRepLayer
|
from torch import Tensor, nn
from .configuration_omdet_turbo import OmDetTurboConfig
class OmDetTurboCSPRepLayer(nn.Module):
"""
Cross Stage Partial (CSP) network layer with RepVGG blocks.
"""
def __init__(self, config: OmDetTurboConfig):
super().__init__()
in_channels = config.encoder_hidden_dim * 2
out_channels = config.encoder_hidden_dim
num_blocks = 3
activation = config.csp_activation
hidden_channels = int(out_channels * config.hidden_expansion)
self.conv1 = OmDetTurboConvNormLayer(config, in_channels, hidden_channels, 1, 1, activation=activation)
self.conv2 = OmDetTurboConvNormLayer(config, in_channels, hidden_channels, 1, 1, activation=activation)
self.bottlenecks = nn.Sequential(*[OmDetTurboRepVggBlock(config) for _ in range(num_blocks)])
if hidden_channels != out_channels:
self.conv3 = OmDetTurboConvNormLayer(config, hidden_channels, out_channels, 1, 1, activation=activation)
else:
self.conv3 = nn.Identity()
def forward(self, hidden_state):
hidden_state_1 = self.conv1(hidden_state)
hidden_state_1 = self.bottlenecks(hidden_state_1)
hidden_state_2 = self.conv2(hidden_state)
return self.conv3(hidden_state_1 + hidden_state_2)
|
class OmDetTurboCSPRepLayer(nn.Module):
'''
Cross Stage Partial (CSP) network layer with RepVGG blocks.
'''
def __init__(self, config: OmDetTurboConfig):
pass
def forward(self, hidden_state):
pass
| 3
| 1
| 11
| 1
| 10
| 0
| 2
| 0.14
| 1
| 6
| 3
| 0
| 2
| 4
| 2
| 12
| 28
| 4
| 21
| 15
| 18
| 3
| 20
| 15
| 17
| 2
| 1
| 1
| 3
|
4,239
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/omdet_turbo/modeling_omdet_turbo.py
|
transformers.models.omdet_turbo.modeling_omdet_turbo.OmDetTurboConvNormLayer
|
from ...activations import ACT2CLS, ACT2FN
from torch import Tensor, nn
class OmDetTurboConvNormLayer(nn.Module):
def __init__(self, config, in_channels, out_channels, kernel_size, stride, padding=None, activation=None):
super().__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding=(kernel_size - 1) // 2 if padding is None else padding, bias=False)
self.norm = nn.BatchNorm2d(out_channels, config.batch_norm_eps)
self.activation = nn.Identity() if activation is None else ACT2CLS[activation]()
def forward(self, hidden_state):
hidden_state = self.conv(hidden_state)
hidden_state = self.norm(hidden_state)
hidden_state = self.activation(hidden_state)
return hidden_state
|
class OmDetTurboConvNormLayer(nn.Module):
def __init__(self, config, in_channels, out_channels, kernel_size, stride, padding=None, activation=None):
pass
def forward(self, hidden_state):
pass
| 3
| 0
| 9
| 0
| 9
| 0
| 2
| 0
| 1
| 1
| 0
| 0
| 2
| 3
| 2
| 12
| 19
| 1
| 18
| 6
| 15
| 0
| 11
| 6
| 8
| 3
| 1
| 0
| 4
|
4,240
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/omdet_turbo/modeling_omdet_turbo.py
|
transformers.models.omdet_turbo.modeling_omdet_turbo.OmDetTurboDecoder
|
from ...modeling_attn_mask_utils import _prepare_4d_attention_mask
import torch.nn.functional as F
from .configuration_omdet_turbo import OmDetTurboConfig
import torch
from functools import lru_cache
from torch import Tensor, nn
class OmDetTurboDecoder(OmDetTurboPreTrainedModel):
def __init__(self, config: OmDetTurboConfig):
self.config = config
super().__init__(config)
self.gradient_checkpointing = False
hidden_dim = config.decoder_hidden_dim
self.num_queries = config.num_queries
self.class_distance_type = config.class_distance_type
self.learn_initial_query = config.learn_initial_query
self.channel_projection_layers = nn.ModuleList((nn.Sequential(nn.Conv2d(x, hidden_dim, 1, bias=False), nn.BatchNorm2d(hidden_dim)) for x in config.vision_features_channels))
self.task_encoder = OmDetTurboTaskEncoder(config)
if config.class_embed_dim != hidden_dim:
self.task_project = nn.Linear(config.class_embed_dim, hidden_dim)
self.layers = nn.ModuleList([OmDetTurboDeformableTransformerDecoderLayer(config) for _ in range(config.decoder_num_layers)])
self.decoder_num_layers = config.decoder_num_layers
if self.learn_initial_query:
self.tgt_embed = nn.Embedding(self.num_queries, hidden_dim)
self.query_position_head = OmDetTurboMLP(input_dim=4, hidden_dim=2 * hidden_dim, output_dim=hidden_dim, num_layers=2)
self.encoder_vision_features = nn.Sequential(nn.Linear(hidden_dim, hidden_dim), nn.LayerNorm(hidden_dim, eps=config.layer_norm_eps))
self.encoder_class_head = nn.Linear(config.class_embed_dim, hidden_dim)
self.encoder_bbox_head = OmDetTurboMLP(input_dim=hidden_dim, hidden_dim=hidden_dim, output_dim=4, num_layers=3)
self.decoder_class_head = nn.ModuleList([nn.Linear(config.class_embed_dim, hidden_dim) for _ in range(config.decoder_num_layers)])
self.decoder_bbox_head = nn.ModuleList([OmDetTurboMLP(hidden_dim, hidden_dim, 4, num_layers=3) for _ in range(config.decoder_num_layers)])
self.post_init()
@lru_cache(maxsize=32)
def generate_anchors(self, spatial_shapes=None, grid_size=0.05, device='cpu', dtype=torch.float32):
if spatial_shapes is None:
raise ValueError('spatial_shapes must be provided')
anchors = []
for level, (height, width) in enumerate(spatial_shapes):
grid_y, grid_x = torch.meshgrid(torch.arange(end=height, dtype=dtype, device=device), torch.arange(end=width, dtype=dtype, device=device), indexing='ij')
grid_xy = torch.stack([grid_x, grid_y], -1)
valid_wh = torch.tensor([width, height], dtype=dtype, device=device)
grid_xy = (grid_xy.unsqueeze(0) + 0.5) / valid_wh
wh = torch.ones_like(grid_xy, dtype=dtype, device=device) * grid_size * 2.0 ** level
anchors.append(torch.concat([grid_xy, wh], -1).reshape(-1, height * width, 4))
eps = 0.01
anchors = torch.concat(anchors, 1)
valid_mask = ((anchors > eps) * (anchors < 1 - eps)).all(-1, keepdim=True)
anchors = torch.log(anchors / (1 - anchors))
anchors = torch.where(valid_mask, anchors, torch.inf)
return (anchors, valid_mask)
def _get_encoder_input(self, vision_features):
vision_features = [self.channel_projection_layers[i](feat) for i, feat in enumerate(vision_features)]
new_vision_features = []
new_vision_shapes_list = []
for feat in vision_features:
height, width = feat.shape[2:]
new_vision_features.append(feat.flatten(2).permute(0, 2, 1))
new_vision_shapes_list.append((height, width))
new_vision_features = torch.cat(new_vision_features, 1)
new_vision_shapes = torch.tensor(new_vision_shapes_list, dtype=torch.int64, device=vision_features[0].device)
level_start_index = torch.cat((new_vision_shapes.new_zeros((1,)), new_vision_shapes.prod(1).cumsum(0)[:-1]))
return (new_vision_features, new_vision_shapes, new_vision_shapes_list, level_start_index)
def _get_decoder_input(self, vision_features, vision_shapes, class_features, denoise_embeddings=None, denoise_bboxes=None):
batch_size = len(vision_features)
anchors, valid_mask = self.generate_anchors(vision_shapes, device=vision_features.device, dtype=vision_features.dtype)
predicted_class_features = self.encoder_vision_features(torch.where(valid_mask, vision_features, torch.tensor(0.0, dtype=vision_features.dtype, device=vision_features.device)))
original_class_projected = self.encoder_class_head(class_features).permute(1, 2, 0)
encoder_class_similarity = get_class_similarity(self.class_distance_type, predicted_class_features, original_class_projected)
encoder_outputs_bboxes = self.encoder_bbox_head(predicted_class_features) + anchors
topk_ind = torch.topk(encoder_class_similarity.max(-1).values, self.num_queries, dim=1).indices.view(-1)
batch_ind = torch.arange(end=batch_size, dtype=topk_ind.dtype, device=topk_ind.device).unsqueeze(-1).repeat(1, self.num_queries).view(-1)
reference_points = encoder_outputs_bboxes[batch_ind, topk_ind].view(batch_size, self.num_queries, -1)
encoder_bboxes = reference_points.sigmoid()
if denoise_bboxes is not None:
reference_points = torch.cat([denoise_bboxes, reference_points], 1)
if self.training:
reference_points = reference_points.detach()
encoder_class_similarity = encoder_class_similarity[batch_ind, topk_ind].view(batch_size, self.num_queries, -1)
if self.learn_initial_query:
embeddings = self.tgt_embed.weight.unsqueeze(0).repeat(batch_size, 1, 1)
else:
embeddings = predicted_class_features[batch_ind, topk_ind].view(batch_size, self.num_queries, -1)
if self.training:
embeddings = embeddings.detach()
if denoise_embeddings is not None:
embeddings = torch.cat([denoise_embeddings, embeddings], 1)
return (embeddings, reference_points, encoder_bboxes, encoder_class_similarity, anchors)
def forward(self, vision_features, class_features, task_features, task_mask, output_attentions=None, output_hidden_states=None, return_dict=None):
"""
Args:
vision_features (`torch.FloatTensor`): The sequence of vision features. shape depends on the vision
backbone.
class_features (`torch.FloatTensor`): The sequence of class features of shape
`(class_sequence_length, batch_size, class_embed_dim)`.
task_features (`torch.FloatTensor`): The sequence of task features of shape
`(task_sequence_length, batch_size, decoder_hidden_dim)`.
task_mask (`torch.LongTensor`): The mask for the task features of shape `(batch_size, task_sequence_length)`.
output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention
layers. See `attentions` under returned tensors for more detail.
output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See
`hidden_states` under returned tensors for more detail.
return_dict (`bool`, *optional*): Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain
tuple.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
vision_features, vision_shapes, vision_shapes_list, level_start_index = self._get_encoder_input(vision_features)
denoise_embeddings, denoise_bboxes, key_padding_mask = (None, None, None)
batch_size = task_mask.shape[0]
task_features = self.task_encoder(task_features)
if self.task_project is not None:
task_features = self.task_project(task_features)
src_key_mask = (task_mask == 0).detach()
attn_mask_len = self.num_queries
fusion_size = attn_mask_len + task_features.shape[0]
key_padding_mask = torch.zeros([batch_size, fusion_size], dtype=torch.bool).to(task_features.device)
key_padding_mask[:, attn_mask_len:] = src_key_mask
attention_mask = _prepare_4d_attention_mask(~key_padding_mask, dtype=vision_features.dtype)
decoder_embeddings, reference_points, encoder_bboxes, encoder_class_similarity, init_reference_points = self._get_decoder_input(vision_features, tuple(vision_shapes_list), class_features, denoise_embeddings, denoise_bboxes)
all_hidden_states = () if output_hidden_states else None
all_attns = () if output_attentions else None
all_self_attns = () if output_attentions else None
all_cross_attns = () if output_attentions else None
predicted_class_features = decoder_embeddings
if output_hidden_states:
all_hidden_states = all_hidden_states + (predicted_class_features,)
decoder_bboxes = []
decoder_classes = []
last_refined_bbox = None
reference_points = reference_points.sigmoid()
for i, layer in enumerate(self.layers):
predicted_class_features, task_features, self_attention, cross_attention = layer(predicted_class_features, task_features, reference_points, vision_features, vision_shapes, vision_shapes_list, level_start_index=level_start_index, attention_mask=attention_mask, query_position=self.query_position_head(reference_points), output_attentions=output_attentions, output_hidden_states=output_hidden_states)
if output_attentions:
all_self_attns = all_self_attns + (self_attention,)
all_cross_attns = all_cross_attns + (cross_attention,)
if output_hidden_states:
all_hidden_states = all_hidden_states + (predicted_class_features,)
refined_bbox = torch.sigmoid(self.decoder_bbox_head[i](predicted_class_features) + _inverse_sigmoid(reference_points))
original_class_projected = self.decoder_class_head[i](class_features).permute(1, 2, 0)
if self.training:
decoder_classes.append(get_class_similarity(class_distance_type=self.class_distance_type, cls_feature=predicted_class_features, class_proj=original_class_projected))
if i == 0:
decoder_bboxes.append(refined_bbox)
else:
decoder_bboxes.append(torch.sigmoid(self.decoder_bbox_head[i](predicted_class_features) + _inverse_sigmoid(last_refined_bbox)))
elif i == self.decoder_num_layers - 1:
decoder_classes.append(get_class_similarity(self.class_distance_type, predicted_class_features, original_class_projected))
decoder_bboxes.append(refined_bbox)
break
last_refined_bbox = refined_bbox
reference_points = refined_bbox.detach() if self.training else refined_bbox
if output_attentions:
all_attns += (all_self_attns, all_cross_attns)
last_hidden_state = predicted_class_features
decoder_bboxes = torch.stack(decoder_bboxes)
decoder_classes = torch.stack(decoder_classes)
if not return_dict:
return (last_hidden_state, all_hidden_states, all_attns, decoder_bboxes, decoder_classes, encoder_bboxes, encoder_class_similarity, init_reference_points, reference_points)
return OmDetTurboDecoderOutput(last_hidden_state=last_hidden_state, hidden_states=all_hidden_states, attentions=all_attns, decoder_coords=decoder_bboxes, decoder_classes=decoder_classes, encoder_coord_logits=encoder_bboxes, encoder_class_logits=encoder_class_similarity, init_reference_points=init_reference_points, intermediate_reference_points=reference_points)
|
class OmDetTurboDecoder(OmDetTurboPreTrainedModel):
def __init__(self, config: OmDetTurboConfig):
pass
@lru_cache(maxsize=32)
def generate_anchors(self, spatial_shapes=None, grid_size=0.05, device='cpu', dtype=torch.float32):
pass
def _get_encoder_input(self, vision_features):
pass
def _get_decoder_input(self, vision_features, vision_shapes, class_features, denoise_embeddings=None, denoise_bboxes=None):
pass
def forward(self, vision_features, class_features, task_features, task_mask, output_attentions=None, output_hidden_states=None, return_dict=None):
'''
Args:
vision_features (`torch.FloatTensor`): The sequence of vision features. shape depends on the vision
backbone.
class_features (`torch.FloatTensor`): The sequence of class features of shape
`(class_sequence_length, batch_size, class_embed_dim)`.
task_features (`torch.FloatTensor`): The sequence of task features of shape
`(task_sequence_length, batch_size, decoder_hidden_dim)`.
task_mask (`torch.LongTensor`): The mask for the task features of shape `(batch_size, task_sequence_length)`.
output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention
layers. See `attentions` under returned tensors for more detail.
output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See
`hidden_states` under returned tensors for more detail.
return_dict (`bool`, *optional*): Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain
tuple.
'''
pass
| 7
| 1
| 62
| 5
| 49
| 8
| 7
| 0.16
| 1
| 11
| 5
| 0
| 5
| 17
| 5
| 11
| 317
| 29
| 249
| 82
| 231
| 39
| 134
| 69
| 128
| 20
| 2
| 3
| 34
|
4,241
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/omdet_turbo/modeling_omdet_turbo.py
|
transformers.models.omdet_turbo.modeling_omdet_turbo.OmDetTurboDeformableTransformerDecoderLayer
|
import torch.nn.functional as F
import torch
from ...activations import ACT2CLS, ACT2FN
from ...modeling_layers import GradientCheckpointingLayer
from torch import Tensor, nn
class OmDetTurboDeformableTransformerDecoderLayer(GradientCheckpointingLayer):
"""
A single layer of the Deformable Transformer Decoder.
"""
def __init__(self, config):
super().__init__()
self.self_attn = OmDetTurboMultiheadAttention(config, hidden_size=config.decoder_hidden_dim, num_attention_heads=config.decoder_num_heads, dropout=config.decoder_dropout)
self.dropout1 = nn.Dropout(config.decoder_dropout)
self.norm1 = nn.LayerNorm(config.decoder_hidden_dim, eps=config.layer_norm_eps)
self.cross_attn = OmDetTurboMultiscaleDeformableAttention(config, num_heads=config.decoder_num_heads, n_points=config.decoder_num_points)
self.dropout2 = nn.Dropout(config.decoder_dropout)
self.norm2 = nn.LayerNorm(config.decoder_hidden_dim, eps=config.layer_norm_eps)
self.linear1 = nn.Linear(config.decoder_hidden_dim, config.decoder_dim_feedforward)
self.act = ACT2FN[config.decoder_activation]
self.dropout3 = nn.Dropout(config.decoder_dropout)
self.linear2 = nn.Linear(config.decoder_dim_feedforward, config.decoder_hidden_dim)
self.dropout4 = nn.Dropout(config.decoder_dropout)
self.norm3 = nn.LayerNorm(config.decoder_hidden_dim, eps=config.layer_norm_eps)
self.output_attentions = config.output_attentions
self.output_hidden_states = config.output_hidden_states
@staticmethod
def with_pos_embed(tensor, pos):
return tensor if pos is None else tensor + pos
def forward(self, decoder_embeddings, task_features, reference_points, vision_features, vision_shapes, vision_shapes_list, level_start_index=None, attention_mask=None, padding_mask=None, query_position=None, output_attentions=None, output_hidden_states=None):
output_attentions = output_attentions if output_attentions is not None else self.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.output_hidden_states
origin_embedding_len = decoder_embeddings.shape[1]
query = key = self.with_pos_embed(decoder_embeddings, query_position)
task_features = task_features.transpose(0, 1)
query = torch.cat((query, task_features), dim=1)
key = torch.cat((key, task_features), dim=1)
decoder_embeddings = torch.cat((decoder_embeddings, task_features), dim=1)
outputs = self.self_attn(query, key, decoder_embeddings, attention_mask=attention_mask, output_attentions=output_attentions)
context, self_attention = outputs if output_attentions else (outputs[0], None)
decoder_embeddings = decoder_embeddings + self.dropout1(context)
decoder_embeddings = self.norm1(decoder_embeddings)
task_features = decoder_embeddings[:, origin_embedding_len:, :].transpose(0, 1)
decoder_embeddings = decoder_embeddings[:, :origin_embedding_len, :]
hidden_states = self.with_pos_embed(decoder_embeddings, query_position)
reference_points = reference_points.unsqueeze(2)
outputs, cross_attention = self.cross_attn(hidden_states=hidden_states, attention_mask=padding_mask, encoder_hidden_states=vision_features, reference_points=reference_points, spatial_shapes=vision_shapes, spatial_shapes_list=vision_shapes_list, level_start_index=level_start_index)
decoder_embeddings = decoder_embeddings + self.dropout2(outputs)
residual = self.norm2(decoder_embeddings)
decoder_embeddings = self.linear2(self.dropout3(self.act(self.linear1(residual))))
decoder_embeddings = residual + self.dropout4(decoder_embeddings)
decoder_embeddings = self.norm3(decoder_embeddings)
return (decoder_embeddings, task_features, self_attention if output_attentions else None, cross_attention if output_attentions else None)
|
class OmDetTurboDeformableTransformerDecoderLayer(GradientCheckpointingLayer):
'''
A single layer of the Deformable Transformer Decoder.
'''
def __init__(self, config):
pass
@staticmethod
def with_pos_embed(tensor, pos):
pass
def forward(self, decoder_embeddings, task_features, reference_points, vision_features, vision_shapes, vision_shapes_list, level_start_index=None, attention_mask=None, padding_mask=None, query_position=None, output_attentions=None, output_hidden_states=None):
pass
| 5
| 1
| 33
| 3
| 27
| 2
| 3
| 0.12
| 1
| 3
| 2
| 0
| 2
| 14
| 3
| 13
| 107
| 13
| 84
| 40
| 65
| 10
| 43
| 25
| 39
| 6
| 1
| 0
| 9
|
4,242
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/omdet_turbo/modeling_omdet_turbo.py
|
transformers.models.omdet_turbo.modeling_omdet_turbo.OmDetTurboEncoder
|
import torch
from typing import Optional, Union
from .configuration_omdet_turbo import OmDetTurboConfig
from torch import Tensor, nn
import torch.nn.functional as F
class OmDetTurboEncoder(nn.Module):
def __init__(self, config: OmDetTurboConfig):
super().__init__()
self.layers = nn.ModuleList([OmDetTurboEncoderLayer(config) for _ in range(config.encoder_layers)])
def forward(self, src, src_mask=None, pos_embed=None, output_attentions: bool=False) -> tuple[Union[torch.Tensor, tuple[torch.Tensor]]]:
hidden_states = src
attention = () if output_attentions else None
for layer in self.layers:
hidden_states = layer(hidden_states, attention_mask=src_mask, position_embeddings=pos_embed, output_attentions=output_attentions)
if output_attentions:
attention = attention + (hidden_states[1],)
hidden_states = hidden_states[0]
return (hidden_states, attention)
|
class OmDetTurboEncoder(nn.Module):
def __init__(self, config: OmDetTurboConfig):
pass
def forward(self, src, src_mask=None, pos_embed=None, output_attentions: bool=False) -> tuple[Union[torch.Tensor, tuple[torch.Tensor]]]:
pass
| 3
| 0
| 11
| 1
| 10
| 0
| 3
| 0
| 1
| 6
| 2
| 0
| 2
| 1
| 2
| 12
| 23
| 3
| 20
| 9
| 15
| 0
| 13
| 7
| 10
| 4
| 1
| 2
| 5
|
4,243
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/omdet_turbo/modeling_omdet_turbo.py
|
transformers.models.omdet_turbo.modeling_omdet_turbo.OmDetTurboEncoderLayer
|
import torch.nn.functional as F
import torch
from typing import Optional, Union
from ...activations import ACT2CLS, ACT2FN
from .configuration_omdet_turbo import OmDetTurboConfig
from torch import Tensor, nn
class OmDetTurboEncoderLayer(nn.Module):
def __init__(self, config: OmDetTurboConfig):
super().__init__()
self.self_attn = OmDetTurboMultiheadAttention(config, hidden_size=config.encoder_hidden_dim, num_attention_heads=config.num_attention_heads, dropout=config.encoder_dropout)
self.self_attn_layer_norm = nn.LayerNorm(config.encoder_hidden_dim, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.encoder_dropout)
self.activation_fn = ACT2FN[config.encoder_feedforward_activation]
self.encoder_feedforward_dropout = nn.Dropout(config.encoder_feedforward_dropout)
self.fc1 = nn.Linear(config.encoder_hidden_dim, config.encoder_dim_feedforward)
self.fc2 = nn.Linear(config.encoder_dim_feedforward, config.encoder_hidden_dim)
self.final_layer_norm = nn.LayerNorm(config.encoder_hidden_dim, eps=config.layer_norm_eps)
@staticmethod
def with_pos_embed(tensor, pos_embed):
return tensor if pos_embed is None else tensor + pos_embed
def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, position_embeddings: Optional[torch.Tensor]=None, output_attentions: bool=False):
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`): attention mask of size
`(batch, 1, target_len, source_len)` where padding elements are indicated by very large negative
values.
position_embeddings (`torch.FloatTensor`, *optional*):
Object queries (also called content embeddings), to be added to the hidden states.
output_attentions (`bool`, *optional*, defaults to `False`):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
"""
residual = hidden_states
query = key = self.with_pos_embed(hidden_states, position_embeddings)
hidden_states = self.self_attn(queries=query, keys=key, values=hidden_states, attention_mask=attention_mask, output_attentions=output_attentions)
hidden_states, attentions = hidden_states if output_attentions else (hidden_states[0], None)
hidden_states = self.dropout(hidden_states)
hidden_states = residual + hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
residual = hidden_states
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = self.encoder_feedforward_dropout(hidden_states)
hidden_states = self.fc2(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = residual + hidden_states
hidden_states = self.final_layer_norm(hidden_states)
if self.training:
if torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any():
clamp_value = torch.finfo(hidden_states.dtype).max - 1000
hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
if output_attentions:
return (hidden_states, attentions)
return (hidden_states,)
|
class OmDetTurboEncoderLayer(nn.Module):
def __init__(self, config: OmDetTurboConfig):
pass
@staticmethod
def with_pos_embed(tensor, pos_embed):
pass
def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, position_embeddings: Optional[torch.Tensor]=None, output_attentions: bool=False):
'''
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`): attention mask of size
`(batch, 1, target_len, source_len)` where padding elements are indicated by very large negative
values.
position_embeddings (`torch.FloatTensor`, *optional*):
Object queries (also called content embeddings), to be added to the hidden states.
output_attentions (`bool`, *optional*, defaults to `False`):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
'''
pass
| 5
| 1
| 22
| 1
| 17
| 4
| 3
| 0.23
| 1
| 5
| 2
| 0
| 2
| 8
| 3
| 13
| 70
| 5
| 53
| 23
| 42
| 12
| 35
| 16
| 31
| 5
| 1
| 2
| 8
|
4,244
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/omdet_turbo/modeling_omdet_turbo.py
|
transformers.models.omdet_turbo.modeling_omdet_turbo.OmDetTurboForObjectDetection
|
import torch
from ...utils import auto_docstring, logging
from typing import Optional, Union
from .configuration_omdet_turbo import OmDetTurboConfig
from torch import Tensor, nn
import torch.nn.functional as F
@auto_docstring(custom_intro='\n OmDetTurbo Model (consisting of a vision and a text backbone, and encoder-decoder architecture) outputting\n bounding boxes and classes scores for tasks such as COCO detection.\n ')
class OmDetTurboForObjectDetection(OmDetTurboPreTrainedModel):
def __init__(self, config: OmDetTurboConfig):
super().__init__(config)
self.vision_backbone = OmDetTurboVisionBackbone(config)
self.language_backbone = OmDetTurboLanguageBackbone(config)
self.encoder = OmDetTurboHybridEncoder(config)
self.decoder = OmDetTurboDecoder(config)
self.num_queries = config.num_queries
self.language_cache_class = OmDetTurboLRUCache(config.cache_size)
self.language_cache_prompt = OmDetTurboLRUCache(config.cache_size)
self.vocab_size = config.text_config.vocab_size
self.post_init()
def get_input_embeddings(self):
return self.language_backbone.model.get_input_embeddings()
def set_input_embeddings(self, value):
self.language_backbone.model.set_input_embeddings(value)
def resize_token_embeddings(self, new_num_tokens: Optional[int]=None, pad_to_multiple_of=None, mean_resizing: bool=True) -> nn.Embedding:
model_embeds = self.language_backbone.model.resize_token_embeddings(new_num_tokens=new_num_tokens, pad_to_multiple_of=pad_to_multiple_of, mean_resizing=mean_resizing)
self.config.text_config.vocab_size = model_embeds.num_embeddings
self.vocab_size = model_embeds.num_embeddings
return model_embeds
@auto_docstring
def forward(self, pixel_values: torch.FloatTensor, classes_input_ids: torch.LongTensor, classes_attention_mask: torch.LongTensor, tasks_input_ids: torch.LongTensor, tasks_attention_mask: torch.LongTensor, classes_structure: torch.LongTensor, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.FloatTensor], OmDetTurboObjectDetectionOutput]:
"""
classes_input_ids (`torch.LongTensor` of shape `(total_classes (>= batch_size), sequence_length)`):
Indices of input classes sequence tokens in the vocabulary of the language model.
Several classes can be provided for each tasks, thus the tokenized classes are flattened
and the structure of the classes is provided in the `classes_structure` argument.
Indices can be obtained using [`OmDetTurboProcessor`]. See [`OmDetTurboProcessor.__call__`] for
details.
[What are input IDs?](../glossary#input-ids)
classes_attention_mask (`torch.BoolTensor` of shape `(total_classes (>= batch_size), num_classes, sequence_length)`):
Attention mask for the classes. This is a binary mask that indicates which tokens should be attended to,
and which should not.
tasks_input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input tasks sequence tokens in the vocabulary of the language model.
Indices can be obtained using [`OmDetTurboProcessor`]. See [`OmDetTurboProcessor.__call__`] for
details.
[What are input IDs?](../glossary#input-ids)
tasks_attention_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length)`):
Attention mask for the tasks. This is a binary mask that indicates which tokens should be attended to,
and which should not.
classes_structure (torch.LongTensor of shape `(batch_size)`):
Structure of the classes. This tensor indicates the number of classes for each task.
Examples:
```python
>>> import requests
>>> from PIL import Image
>>> from transformers import AutoProcessor, OmDetTurboForObjectDetection
>>> processor = AutoProcessor.from_pretrained("omlab/omdet-turbo-swin-tiny-hf")
>>> model = OmDetTurboForObjectDetection.from_pretrained("omlab/omdet-turbo-swin-tiny-hf")
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> classes = ["cat", "remote"]
>>> task = "Detect {}.".format(", ".join(classes))
>>> inputs = processor(image, text=classes, task=task, return_tensors="pt")
>>> outputs = model(**inputs)
>>> # convert outputs (bounding boxes and class logits)
>>> results = processor.post_process_grounded_object_detection(
... outputs,
... classes=classes,
... target_sizes=[image.size[::-1]],
... score_threshold=0.3,
... nms_threshold=0.3,
>>> )[0]
>>> for score, class_name, box in zip(results["scores"], results["classes"], results["boxes"]):
... box = [round(i, 1) for i in box.tolist()]
... print(
... f"Detected {class_name} with confidence "
... f"{round(score.item(), 2)} at location {box}"
... )
Detected remote with confidence 0.76 at location [39.9, 71.3, 176.5, 117.9]
Detected cat with confidence 0.72 at location [345.1, 22.5, 639.7, 371.9]
Detected cat with confidence 0.65 at location [12.7, 53.8, 315.5, 475.3]
Detected remote with confidence 0.57 at location [333.4, 75.6, 370.7, 187.0]
```"""
if labels is not None:
raise NotImplementedError('Training is not implemented yet')
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
loss = None
image_features = self.vision_backbone(pixel_values)
encoder_outputs = self.encoder(image_features, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
class_features, task_features, task_mask = self.get_language_embedding(classes_input_ids, classes_attention_mask, tasks_input_ids, tasks_attention_mask, classes_structure)
encoder_extracted_states = encoder_outputs.extracted_states if return_dict else encoder_outputs[-1]
decoder_outputs = self.decoder(encoder_extracted_states, class_features, task_features, task_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
if not return_dict:
return tuple((output for output in [loss, decoder_outputs[3][-1], decoder_outputs[4][-1], decoder_outputs[7], decoder_outputs[8], decoder_outputs[5], decoder_outputs[6], encoder_outputs[-1], decoder_outputs[1], decoder_outputs[2], encoder_outputs[1], encoder_outputs[2], classes_structure] if output is not None))
return OmDetTurboObjectDetectionOutput(loss=loss, decoder_coord_logits=decoder_outputs.decoder_coords[-1], decoder_class_logits=decoder_outputs.decoder_classes[-1], init_reference_points=decoder_outputs.init_reference_points, intermediate_reference_points=decoder_outputs.intermediate_reference_points, encoder_coord_logits=decoder_outputs.encoder_coord_logits, encoder_class_logits=decoder_outputs.encoder_class_logits, encoder_extracted_states=encoder_outputs.extracted_states, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, classes_structure=classes_structure)
|
@auto_docstring(custom_intro='\n OmDetTurbo Model (consisting of a vision and a text backbone, and encoder-decoder architecture) outputting\n bounding boxes and classes scores for tasks such as COCO detection.\n ')
class OmDetTurboForObjectDetection(OmDetTurboPreTrainedModel):
def __init__(self, config: OmDetTurboConfig):
pass
def get_input_embeddings(self):
pass
def set_input_embeddings(self, value):
pass
def resize_token_embeddings(self, new_num_tokens: Optional[int]=None, pad_to_multiple_of=None, mean_resizing: bool=True) -> nn.Embedding:
pass
@auto_docstring
def forward(self, pixel_values: torch.FloatTensor, classes_input_ids: torch.LongTensor, classes_attention_mask: torch.LongTensor, tasks_input_ids: torch.LongTensor, tasks_attention_mask: torch.LongTensor, classes_structure: torch.LongTensor, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.FloatTensor], OmDetTurboObjectDetectionOutput]:
'''
classes_input_ids (`torch.LongTensor` of shape `(total_classes (>= batch_size), sequence_length)`):
Indices of input classes sequence tokens in the vocabulary of the language model.
Several classes can be provided for each tasks, thus the tokenized classes are flattened
and the structure of the classes is provided in the `classes_structure` argument.
Indices can be obtained using [`OmDetTurboProcessor`]. See [`OmDetTurboProcessor.__call__`] for
details.
[What are input IDs?](../glossary#input-ids)
classes_attention_mask (`torch.BoolTensor` of shape `(total_classes (>= batch_size), num_classes, sequence_length)`):
Attention mask for the classes. This is a binary mask that indicates which tokens should be attended to,
and which should not.
tasks_input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input tasks sequence tokens in the vocabulary of the language model.
Indices can be obtained using [`OmDetTurboProcessor`]. See [`OmDetTurboProcessor.__call__`] for
details.
[What are input IDs?](../glossary#input-ids)
tasks_attention_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length)`):
Attention mask for the tasks. This is a binary mask that indicates which tokens should be attended to,
and which should not.
classes_structure (torch.LongTensor of shape `(batch_size)`):
Structure of the classes. This tensor indicates the number of classes for each task.
Examples:
```python
>>> import requests
>>> from PIL import Image
>>> from transformers import AutoProcessor, OmDetTurboForObjectDetection
>>> processor = AutoProcessor.from_pretrained("omlab/omdet-turbo-swin-tiny-hf")
>>> model = OmDetTurboForObjectDetection.from_pretrained("omlab/omdet-turbo-swin-tiny-hf")
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> classes = ["cat", "remote"]
>>> task = "Detect {}.".format(", ".join(classes))
>>> inputs = processor(image, text=classes, task=task, return_tensors="pt")
>>> outputs = model(**inputs)
>>> # convert outputs (bounding boxes and class logits)
>>> results = processor.post_process_grounded_object_detection(
... outputs,
... classes=classes,
... target_sizes=[image.size[::-1]],
... score_threshold=0.3,
... nms_threshold=0.3,
>>> )[0]
>>> for score, class_name, box in zip(results["scores"], results["classes"], results["boxes"]):
... box = [round(i, 1) for i in box.tolist()]
... print(
... f"Detected {class_name} with confidence "
... f"{round(score.item(), 2)} at location {box}"
... )
Detected remote with confidence 0.76 at location [39.9, 71.3, 176.5, 117.9]
Detected cat with confidence 0.72 at location [345.1, 22.5, 639.7, 371.9]
Detected cat with confidence 0.65 at location [12.7, 53.8, 315.5, 475.3]
Detected remote with confidence 0.57 at location [333.4, 75.6, 370.7, 187.0]
```'''
pass
| 8
| 1
| 30
| 2
| 21
| 7
| 2
| 0.32
| 1
| 12
| 7
| 0
| 5
| 8
| 5
| 11
| 157
| 16
| 107
| 36
| 85
| 34
| 36
| 21
| 30
| 7
| 2
| 1
| 11
|
4,245
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/omdet_turbo/modeling_omdet_turbo.py
|
transformers.models.omdet_turbo.modeling_omdet_turbo.OmDetTurboHybridEncoder
|
from .configuration_omdet_turbo import OmDetTurboConfig
import torch
import torch.nn.functional as F
from torch import Tensor, nn
class OmDetTurboHybridEncoder(nn.Module):
"""
Encoder consisting of channel projection layers, a set of `OmDetTurboEncoder`, a top-down Feature Pyramid Network
(FPN) and a bottom-up Path Aggregation Network (PAN). More details on the paper: https://huggingface.co/papers/2304.08069
Args:
config: OmDetTurboConfig
"""
def __init__(self, config: OmDetTurboConfig):
super().__init__()
self.config = config
self.in_channels = config.encoder_in_channels
self.encoder_hidden_dim = config.encoder_hidden_dim
self.encoder_projection_indices = config.encoder_projection_indices
self.positional_encoding_temperature = config.positional_encoding_temperature
self.eval_size = config.eval_size
self.out_channels = [self.encoder_hidden_dim for _ in self.in_channels]
self.channel_projection_layers = nn.ModuleList()
for in_channel in self.in_channels:
self.channel_projection_layers.append(nn.Sequential(nn.Conv2d(in_channel, self.encoder_hidden_dim, kernel_size=(1, 1), bias=False), nn.BatchNorm2d(self.encoder_hidden_dim)))
self.encoder = nn.ModuleList([OmDetTurboEncoder(config) for _ in range(len(self.encoder_projection_indices))])
self.lateral_convs = nn.ModuleList()
self.fpn_blocks = nn.ModuleList()
for _ in range(len(self.in_channels) - 1, 0, -1):
self.lateral_convs.append(OmDetTurboConvNormLayer(config, in_channels=self.encoder_hidden_dim, out_channels=self.encoder_hidden_dim, kernel_size=1, stride=1, activation=config.conv_norm_activation))
self.fpn_blocks.append(OmDetTurboCSPRepLayer(config))
self.downsample_convs = nn.ModuleList()
self.pan_blocks = nn.ModuleList()
for _ in range(len(self.in_channels) - 1):
self.downsample_convs.append(OmDetTurboConvNormLayer(config, in_channels=self.encoder_hidden_dim, out_channels=self.encoder_hidden_dim, kernel_size=3, stride=2, activation=config.conv_norm_activation))
self.pan_blocks.append(OmDetTurboCSPRepLayer(config))
@staticmethod
def build_2d_sincos_position_embedding(width, height, embed_dim=256, temperature=10000.0, device='cpu', dtype=torch.float32):
grid_w = torch.arange(int(width), dtype=dtype, device=device)
grid_h = torch.arange(int(height), dtype=dtype, device=device)
grid_w, grid_h = torch.meshgrid(grid_w, grid_h, indexing='ij')
if embed_dim % 4 != 0:
raise ValueError('Embed dimension must be divisible by 4 for 2D sin-cos position embedding')
pos_dim = embed_dim // 4
omega = torch.arange(pos_dim, dtype=dtype, device=device) / pos_dim
omega = 1.0 / temperature ** omega
out_w = grid_w.flatten()[..., None] @ omega[None]
out_h = grid_h.flatten()[..., None] @ omega[None]
return torch.concat([out_w.sin(), out_w.cos(), out_h.sin(), out_h.cos()], dim=1)[None, :, :]
def forward(self, inputs_embeddings=None, output_attentions=None, output_hidden_states=None, return_dict=None):
"""
Args:
inputs_embeddings (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Flattened feature map (output of the backbone + projection layers) that is passed to the encoder.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
hidden_states = inputs_embeddings
encoder_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
projected_features = [self.channel_projection_layers[i](feature) for i, feature in enumerate(hidden_states)]
for encoder_layer_index, feature_to_project_index in enumerate(self.encoder_projection_indices):
if output_hidden_states:
encoder_states = encoder_states + (projected_features[feature_to_project_index],)
height, width = projected_features[feature_to_project_index].shape[2:]
src_flatten = projected_features[feature_to_project_index].flatten(2).permute(0, 2, 1)
if self.training or self.eval_size is None:
pos_embed = self.build_2d_sincos_position_embedding(width, height, self.encoder_hidden_dim, self.positional_encoding_temperature, device=src_flatten.device, dtype=src_flatten.dtype).to(src_flatten.device, src_flatten.dtype)
else:
pos_embed = None
layer_outputs = self.encoder[encoder_layer_index](src_flatten, pos_embed=pos_embed, output_attentions=output_attentions)
projected_features[feature_to_project_index] = layer_outputs[0].permute(0, 2, 1).reshape(-1, self.encoder_hidden_dim, height, width).contiguous()
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
if output_hidden_states:
encoder_states = encoder_states + (projected_features[feature_to_project_index],)
fpn_feature_maps = [projected_features[-1]]
for idx in range(len(self.in_channels) - 1, 0, -1):
feat_high = fpn_feature_maps[0]
feat_low = projected_features[idx - 1]
feat_high = self.lateral_convs[len(self.in_channels) - 1 - idx](feat_high)
fpn_feature_maps[0] = feat_high
upsample_feat = F.interpolate(feat_high, scale_factor=2.0, mode='nearest')
fps_map = self.fpn_blocks[len(self.in_channels) - 1 - idx](torch.concat([upsample_feat, feat_low], dim=1))
fpn_feature_maps.insert(0, fps_map)
fpn_states = [fpn_feature_maps[0]]
for idx in range(len(self.in_channels) - 1):
feat_low = fpn_states[-1]
feat_high = fpn_feature_maps[idx + 1]
downsample_feat = self.downsample_convs[idx](feat_low)
hidden_states = self.pan_blocks[idx](torch.concat([downsample_feat, feat_high.to(downsample_feat.device)], dim=1))
fpn_states.append(hidden_states)
if not return_dict:
return (fpn_states[-1], encoder_states, all_attentions, fpn_states)
return OmDetTurboEncoderOutput(last_hidden_state=fpn_states[-1], hidden_states=encoder_states, attentions=all_attentions, extracted_states=fpn_states)
|
class OmDetTurboHybridEncoder(nn.Module):
'''
Encoder consisting of channel projection layers, a set of `OmDetTurboEncoder`, a top-down Feature Pyramid Network
(FPN) and a bottom-up Path Aggregation Network (PAN). More details on the paper: https://huggingface.co/papers/2304.08069
Args:
config: OmDetTurboConfig
'''
def __init__(self, config: OmDetTurboConfig):
pass
@staticmethod
def build_2d_sincos_position_embedding(width, height, embed_dim=256, temperature=10000.0, device='cpu', dtype=torch.float32):
pass
def forward(self, inputs_embeddings=None, output_attentions=None, output_hidden_states=None, return_dict=None):
'''
Args:
inputs_embeddings (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Flattened feature map (output of the backbone + projection layers) that is passed to the encoder.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
'''
pass
| 5
| 2
| 54
| 4
| 43
| 7
| 7
| 0.2
| 1
| 10
| 5
| 0
| 2
| 13
| 3
| 13
| 174
| 15
| 132
| 50
| 119
| 27
| 77
| 41
| 73
| 14
| 1
| 2
| 20
|
4,246
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/omdet_turbo/modeling_omdet_turbo.py
|
transformers.models.omdet_turbo.modeling_omdet_turbo.OmDetTurboLRUCache
|
from collections import OrderedDict
class OmDetTurboLRUCache:
def __init__(self, capacity: int):
self.cache = OrderedDict()
self.capacity = capacity
self.current_load = 0
def has(self, key) -> bool:
return key in self.cache
def get(self, key):
"""
Get the value of the key if the key exists in the cache, otherwise return None.
Move the key to the end of the cache to show that it was recently used.
"""
if key not in self.cache:
return None
self.cache.move_to_end(key)
return self.cache[key]
def put(self, key, value) -> None:
"""
Add the key-value pair to the cache.
Move the key to the end of the cache to show that it was recently used.
If the cache is full, remove the first key (least recently used).
"""
if key not in self.cache:
self.current_load += 1
if self.current_load > self.capacity:
self.cache.popitem(last=False)
self.current_load -= 1
self.cache[key] = value
self.cache.move_to_end(key)
|
class OmDetTurboLRUCache:
def __init__(self, capacity: int):
pass
def has(self, key) -> bool:
pass
def get(self, key):
'''
Get the value of the key if the key exists in the cache, otherwise return None.
Move the key to the end of the cache to show that it was recently used.
'''
pass
def put(self, key, value) -> None:
'''
Add the key-value pair to the cache.
Move the key to the end of the cache to show that it was recently used.
If the cache is full, remove the first key (least recently used).
'''
pass
| 5
| 2
| 7
| 0
| 5
| 2
| 2
| 0.45
| 0
| 3
| 0
| 0
| 4
| 3
| 4
| 4
| 33
| 4
| 20
| 8
| 15
| 9
| 20
| 8
| 15
| 3
| 0
| 2
| 7
|
4,247
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/omdet_turbo/modeling_omdet_turbo.py
|
transformers.models.omdet_turbo.modeling_omdet_turbo.OmDetTurboLanguageBackbone
|
import torch
from .configuration_omdet_turbo import OmDetTurboConfig
from torch import Tensor, nn
from ..auto import AutoModel
import torch.nn.functional as F
class OmDetTurboLanguageBackbone(nn.Module):
def __init__(self, config: OmDetTurboConfig):
super().__init__()
self.model = AutoModel.from_config(config.text_config)
self.text_projection = nn.Parameter(torch.zeros(config.text_projection_in_dim, config.text_projection_out_dim))
def forward(self, hidden_states, mask=None, encode_type='task'):
text_outputs = self.model(hidden_states)
pooled_output = text_outputs[0]
if encode_type == 'task':
if mask is None:
raise ValueError('mask is required for task encoding')
max_len = (mask != 0).sum(1).max().item()
truncated_mask = mask[:, :max_len]
truncated_output = pooled_output[:, :max_len, :]
return (truncated_output.transpose(0, 1), truncated_mask)
elif encode_type == 'class':
max_pooled_output = pooled_output[torch.arange(pooled_output.shape[0]), hidden_states.argmax(dim=-1)]
projected_output = max_pooled_output @ self.text_projection
return projected_output
else:
raise ValueError(f'encode_type {encode_type} is not supported')
|
class OmDetTurboLanguageBackbone(nn.Module):
def __init__(self, config: OmDetTurboConfig):
pass
def forward(self, hidden_states, mask=None, encode_type='task'):
pass
| 3
| 0
| 10
| 0
| 10
| 0
| 3
| 0
| 1
| 4
| 2
| 0
| 2
| 2
| 2
| 12
| 22
| 1
| 21
| 12
| 18
| 0
| 19
| 12
| 16
| 4
| 1
| 2
| 5
|
4,248
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/omdet_turbo/modeling_omdet_turbo.py
|
transformers.models.omdet_turbo.modeling_omdet_turbo.OmDetTurboMLP
|
from torch import Tensor, nn
import torch.nn.functional as F
class OmDetTurboMLP(nn.Module):
"""Very simple multi-layer perceptron (also called FFN)"""
def __init__(self, input_dim, hidden_dim, output_dim, num_layers):
super().__init__()
self.num_layers = num_layers
hidden_layers_dims = [hidden_dim] * (num_layers - 1)
layers_dims = [input_dim] + hidden_layers_dims + [output_dim]
self.layers = nn.ModuleList([nn.Linear(in_dim, out_dim) for in_dim, out_dim in zip(layers_dims[:-1], layers_dims[1:])])
def forward(self, x):
for i, layer in enumerate(self.layers):
x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x)
return x
|
class OmDetTurboMLP(nn.Module):
'''Very simple multi-layer perceptron (also called FFN)'''
def __init__(self, input_dim, hidden_dim, output_dim, num_layers):
pass
def forward(self, x):
pass
| 3
| 1
| 6
| 0
| 6
| 0
| 2
| 0.08
| 1
| 3
| 0
| 0
| 2
| 2
| 2
| 12
| 16
| 2
| 13
| 8
| 10
| 1
| 11
| 8
| 8
| 3
| 1
| 1
| 4
|
4,249
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/omdet_turbo/modeling_omdet_turbo.py
|
transformers.models.omdet_turbo.modeling_omdet_turbo.OmDetTurboMLPWithDropout
|
from ...activations import ACT2CLS, ACT2FN
from torch import Tensor, nn
class OmDetTurboMLPWithDropout(nn.Module):
def __init__(self, config):
super().__init__()
self.linear1 = nn.Linear(config.class_embed_dim, config.task_encoder_hidden_dim)
self.activation = ACT2FN[config.decoder_activation]
self.dropout = nn.Dropout(config.decoder_dropout)
self.linear2 = nn.Linear(config.task_encoder_hidden_dim, config.class_embed_dim)
def forward(self, x):
return self.linear2(self.dropout(self.activation(self.linear1(x))))
|
class OmDetTurboMLPWithDropout(nn.Module):
def __init__(self, config):
pass
def forward(self, x):
pass
| 3
| 0
| 4
| 0
| 4
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 2
| 4
| 2
| 12
| 10
| 1
| 9
| 7
| 6
| 0
| 9
| 7
| 6
| 1
| 1
| 0
| 2
|
4,250
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/omdet_turbo/modeling_omdet_turbo.py
|
transformers.models.omdet_turbo.modeling_omdet_turbo.OmDetTurboMultiheadAttention
|
import torch.nn.functional as F
import torch
import math
from typing import Optional, Union
from torch import Tensor, nn
class OmDetTurboMultiheadAttention(nn.Module):
"""Equivalent implementation of nn.MultiheadAttention with `batch_first=True`."""
def __init__(self, config, hidden_size, num_attention_heads, dropout):
super().__init__()
if hidden_size % num_attention_heads != 0:
raise ValueError(f'The hidden size ({hidden_size}) is not a multiple of the number of attention heads ({num_attention_heads})')
self.num_attention_heads = num_attention_heads
self.attention_head_size = int(hidden_size / num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(hidden_size, self.all_head_size)
self.key = nn.Linear(hidden_size, self.all_head_size)
self.value = nn.Linear(hidden_size, self.all_head_size)
self.out_proj = nn.Linear(hidden_size, hidden_size)
self.dropout = nn.Dropout(dropout)
def forward(self, queries: torch.Tensor, keys: torch.Tensor, values: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False) -> tuple[torch.Tensor]:
batch_size, seq_length, _ = queries.shape
query_layer = self.query(queries).view(batch_size, -1, self.num_attention_heads, self.attention_head_size).transpose(1, 2)
key_layer = self.key(keys).view(batch_size, -1, self.num_attention_heads, self.attention_head_size).transpose(1, 2)
value_layer = self.value(values).view(batch_size, -1, self.num_attention_heads, self.attention_head_size).transpose(1, 2)
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
if attention_mask is not None:
attention_scores = attention_scores + attention_mask
attention_probs = nn.functional.softmax(attention_scores, dim=-1)
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(new_context_layer_shape)
context_layer = self.out_proj(context_layer)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
return outputs
|
class OmDetTurboMultiheadAttention(nn.Module):
'''Equivalent implementation of nn.MultiheadAttention with `batch_first=True`.'''
def __init__(self, config, hidden_size, num_attention_heads, dropout):
pass
def forward(self, queries: torch.Tensor, keys: torch.Tensor, values: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False) -> tuple[torch.Tensor]:
pass
| 3
| 1
| 19
| 3
| 14
| 1
| 2
| 0.11
| 1
| 5
| 0
| 0
| 3
| 8
| 3
| 13
| 61
| 12
| 44
| 28
| 33
| 5
| 34
| 21
| 30
| 3
| 1
| 1
| 6
|
4,251
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/omdet_turbo/modeling_omdet_turbo.py
|
transformers.models.omdet_turbo.modeling_omdet_turbo.OmDetTurboMultiscaleDeformableAttention
|
import torch.nn.functional as F
import torch
import warnings
from typing import Optional, Union
from .configuration_omdet_turbo import OmDetTurboConfig
from torch import Tensor, nn
class OmDetTurboMultiscaleDeformableAttention(nn.Module):
"""
Multiscale deformable attention as proposed in Deformable DETR.
"""
def __init__(self, config: OmDetTurboConfig, num_heads: int, n_points: int):
super().__init__()
self.attn = MultiScaleDeformableAttention()
if config.d_model % num_heads != 0:
raise ValueError(f'embed_dim (d_model) must be divisible by num_heads, but got {config.d_model} and {num_heads}')
dim_per_head = config.d_model // num_heads
if not (dim_per_head & dim_per_head - 1 == 0 and dim_per_head != 0):
warnings.warn("You'd better set embed_dim (d_model) in OmDetTurboMultiscaleDeformableAttention to make the dimension of each attention head a power of 2 which is more efficient in the authors' CUDA implementation.")
self.im2col_step = 64
self.d_model = config.d_model
self.n_levels = config.num_feature_levels
self.n_heads = num_heads
self.n_points = n_points
self.sampling_offsets = nn.Linear(config.d_model, num_heads * self.n_levels * n_points * 2)
self.attention_weights = nn.Linear(config.d_model, num_heads * self.n_levels * n_points)
self.value_proj = nn.Linear(config.d_model, config.d_model)
self.output_proj = nn.Linear(config.d_model, config.d_model)
self.disable_custom_kernels = config.disable_custom_kernels
def with_pos_embed(self, tensor: torch.Tensor, position_embeddings: Optional[Tensor]):
return tensor if position_embeddings is None else tensor + position_embeddings
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states=None, encoder_attention_mask=None, position_embeddings: Optional[torch.Tensor]=None, reference_points=None, spatial_shapes=None, spatial_shapes_list=None, level_start_index=None, output_attentions: bool=False):
if position_embeddings is not None:
hidden_states = self.with_pos_embed(hidden_states, position_embeddings)
batch_size, num_queries, _ = hidden_states.shape
batch_size, sequence_length, _ = encoder_hidden_states.shape
total_elements = sum([shape[0] * shape[1] for shape in spatial_shapes_list])
if total_elements != sequence_length:
raise ValueError('Make sure to align the spatial shapes with the sequence length of the encoder hidden states')
value = self.value_proj(encoder_hidden_states)
if attention_mask is not None:
value = value.masked_fill(~attention_mask[..., None], float(0))
value = value.view(batch_size, sequence_length, self.n_heads, self.d_model // self.n_heads)
sampling_offsets = self.sampling_offsets(hidden_states).view(batch_size, num_queries, self.n_heads, self.n_levels, self.n_points, 2)
attention_weights = self.attention_weights(hidden_states).view(batch_size, num_queries, self.n_heads, self.n_levels * self.n_points)
attention_weights = F.softmax(attention_weights, -1).view(batch_size, num_queries, self.n_heads, self.n_levels, self.n_points)
num_coordinates = reference_points.shape[-1]
if num_coordinates == 2:
offset_normalizer = torch.stack([spatial_shapes[..., 1], spatial_shapes[..., 0]], -1)
sampling_locations = reference_points[:, :, None, :, None, :] + sampling_offsets / offset_normalizer[None, None, None, :, None, :]
elif num_coordinates == 4:
sampling_locations = reference_points[:, :, None, :, None, :2] + sampling_offsets / self.n_points * reference_points[:, :, None, :, None, 2:] * 0.5
else:
raise ValueError(f'Last dim of reference_points must be 2 or 4, but got {reference_points.shape[-1]}')
output = self.attn(value, spatial_shapes, spatial_shapes_list, level_start_index, sampling_locations, attention_weights, self.im2col_step)
output = self.output_proj(output)
return (output, attention_weights)
|
class OmDetTurboMultiscaleDeformableAttention(nn.Module):
'''
Multiscale deformable attention as proposed in Deformable DETR.
'''
def __init__(self, config: OmDetTurboConfig, num_heads: int, n_points: int):
pass
def with_pos_embed(self, tensor: torch.Tensor, position_embeddings: Optional[Tensor]):
pass
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states=None, encoder_attention_mask=None, position_embeddings: Optional[torch.Tensor]=None, reference_points=None, spatial_shapes=None, spatial_shapes_list=None, level_start_index=None, output_attentions: bool=False):
pass
| 4
| 1
| 39
| 3
| 33
| 3
| 5
| 0.11
| 1
| 9
| 2
| 0
| 3
| 10
| 3
| 13
| 125
| 13
| 101
| 39
| 85
| 11
| 55
| 26
| 51
| 8
| 1
| 2
| 15
|
4,252
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/omdet_turbo/modeling_omdet_turbo.py
|
transformers.models.omdet_turbo.modeling_omdet_turbo.OmDetTurboObjectDetectionOutput
|
import torch
from ...file_utils import ModelOutput
from ...utils import auto_docstring, logging
from typing import Optional, Union
from dataclasses import dataclass
import torch.nn.functional as F
@dataclass
@auto_docstring(custom_intro='\n Output type of [`OmDetTurboObjectDetectionOutput`].\n ')
class OmDetTurboObjectDetectionOutput(ModelOutput):
"""
loss (`torch.FloatTensor`):
The loss value.
decoder_coord_logits (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)`):
The predicted coordinates logits of the objects.
decoder_class_logits (`torch.FloatTensor` of shape `(batch_size, num_queries, num_classes)`):
The predicted class of the objects.
init_reference_points (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)`):
The initial reference points.
intermediate_reference_points (`tuple[tuple[torch.FloatTensor]]`):
The intermediate reference points.
encoder_coord_logits (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)`):
The predicted coordinates of the objects from the encoder.
encoder_class_logits (`tuple[torch.FloatTensor]`):
The predicted class of the objects from the encoder.
encoder_extracted_states (`torch.FloatTensor`):
The extracted states from the Feature Pyramid Network (FPN) and Path Aggregation Network (PAN) of the encoder.
decoder_hidden_states (`tuple[torch.FloatTensor]`, *optional*):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape
`(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer
plus the initial embedding outputs.
decoder_attentions (`tuple[tuple[torch.FloatTensor]]`, *optional*):
Tuple of tuples of `torch.FloatTensor` (one for attention for each layer) of shape `(batch_size, num_heads,
sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the
weighted average in the self-attention, cross-attention and multi-scale deformable attention heads.
encoder_hidden_states (`tuple[torch.FloatTensor]`, *optional*):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape
`(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer
plus the initial embedding outputs.
encoder_attentions (`tuple[tuple[torch.FloatTensor]]`, *optional*):
Tuple of tuples of `torch.FloatTensor` (one for attention for each layer) of shape `(batch_size, num_heads,
sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the
weighted average in the self-attention, cross-attention and multi-scale deformable attention heads.
classes_structure (`torch.LongTensor`, *optional*):
The number of queried classes for each image.
"""
loss: Optional[torch.FloatTensor] = None
decoder_coord_logits: Optional[torch.FloatTensor] = None
decoder_class_logits: Optional[torch.FloatTensor] = None
init_reference_points: Optional[torch.FloatTensor] = None
intermediate_reference_points: Optional[tuple[tuple[torch.FloatTensor]]] = None
encoder_coord_logits: Optional[torch.FloatTensor] = None
encoder_class_logits: Optional[tuple[torch.FloatTensor]] = None
encoder_extracted_states: Optional[torch.FloatTensor] = None
decoder_hidden_states: Optional[tuple[torch.FloatTensor]] = None
decoder_attentions: Optional[tuple[tuple[torch.FloatTensor]]] = None
encoder_hidden_states: Optional[tuple[torch.FloatTensor]] = None
encoder_attentions: Optional[tuple[tuple[torch.FloatTensor]]] = None
classes_structure: Optional[torch.LongTensor] = None
|
@dataclass
@auto_docstring(custom_intro='\n Output type of [`OmDetTurboObjectDetectionOutput`].\n ')
class OmDetTurboObjectDetectionOutput(ModelOutput):
'''
loss (`torch.FloatTensor`):
The loss value.
decoder_coord_logits (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)`):
The predicted coordinates logits of the objects.
decoder_class_logits (`torch.FloatTensor` of shape `(batch_size, num_queries, num_classes)`):
The predicted class of the objects.
init_reference_points (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)`):
The initial reference points.
intermediate_reference_points (`tuple[tuple[torch.FloatTensor]]`):
The intermediate reference points.
encoder_coord_logits (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)`):
The predicted coordinates of the objects from the encoder.
encoder_class_logits (`tuple[torch.FloatTensor]`):
The predicted class of the objects from the encoder.
encoder_extracted_states (`torch.FloatTensor`):
The extracted states from the Feature Pyramid Network (FPN) and Path Aggregation Network (PAN) of the encoder.
decoder_hidden_states (`tuple[torch.FloatTensor]`, *optional*):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape
`(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer
plus the initial embedding outputs.
decoder_attentions (`tuple[tuple[torch.FloatTensor]]`, *optional*):
Tuple of tuples of `torch.FloatTensor` (one for attention for each layer) of shape `(batch_size, num_heads,
sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the
weighted average in the self-attention, cross-attention and multi-scale deformable attention heads.
encoder_hidden_states (`tuple[torch.FloatTensor]`, *optional*):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape
`(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer
plus the initial embedding outputs.
encoder_attentions (`tuple[tuple[torch.FloatTensor]]`, *optional*):
Tuple of tuples of `torch.FloatTensor` (one for attention for each layer) of shape `(batch_size, num_heads,
sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the
weighted average in the self-attention, cross-attention and multi-scale deformable attention heads.
classes_structure (`torch.LongTensor`, *optional*):
The number of queried classes for each image.
'''
pass
| 3
| 1
| 0
| 0
| 0
| 0
| 0
| 2.71
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 54
| 2
| 14
| 14
| 13
| 38
| 14
| 14
| 13
| 0
| 1
| 0
| 0
|
4,253
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/omdet_turbo/modeling_omdet_turbo.py
|
transformers.models.omdet_turbo.modeling_omdet_turbo.OmDetTurboPreTrainedModel
|
import torch
from ...utils import auto_docstring, logging
import math
from ...modeling_utils import PreTrainedModel
from .configuration_omdet_turbo import OmDetTurboConfig
from torch import Tensor, nn
import torch.nn.functional as F
@auto_docstring
class OmDetTurboPreTrainedModel(PreTrainedModel):
config: OmDetTurboConfig
base_model_prefix = 'model'
main_input_name = 'pixel_values'
def _init_weights(self, module):
def linear_init_(module_to_init):
bound = 1 / math.sqrt(module_to_init.weight.shape[0])
nn.init.uniform_(module_to_init.weight, -bound, bound)
if hasattr(module_to_init, 'bias') and module_to_init.bias is not None:
nn.init.uniform_(module_to_init.bias, -bound, bound)
if isinstance(module, OmDetTurboEncoderLayer):
linear_init_(module.fc1)
linear_init_(module.fc2)
elif isinstance(module, OmDetTurboDecoder):
nn.init.constant_(module.encoder_bbox_head.layers[-1].weight, 0.0)
nn.init.constant_(module.encoder_bbox_head.layers[-1].bias, 0.0)
for mlp in module.decoder_bbox_head:
nn.init.constant_(mlp.layers[-1].weight, 0.0)
nn.init.constant_(mlp.layers[-1].bias, 0.0)
linear_init_(module.encoder_vision_features[0])
nn.init.xavier_uniform_(module.encoder_vision_features[0].weight)
if module.learn_initial_query:
nn.init.xavier_uniform_(module.tgt_embed.weight)
nn.init.xavier_uniform_(module.query_position_head.layers[0].weight)
nn.init.xavier_uniform_(module.query_position_head.layers[1].weight)
for layer in module.channel_projection_layers:
nn.init.xavier_uniform_(layer[0].weight)
elif isinstance(module, OmDetTurboLanguageBackbone):
nn.init.normal_(module.text_projection, std=self.config.text_projection_in_dim ** (-0.5))
elif isinstance(module, (nn.Linear, nn.Conv2d)):
module.weight.data.normal_(mean=0.0, std=self.config.init_std)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, (nn.LayerNorm, nn.BatchNorm2d)):
module.weight.data.fill_(1.0)
module.bias.data.zero_()
def _set_gradient_checkpointing(self, module, value=False):
if isinstance(module, OmDetTurboDecoder):
module.gradient_checkpointing = value
@staticmethod
def _get_cache_key_at_index(input_ids, attention_mask, index):
input_ids = input_ids[index]
input_mask = attention_mask[index]
cache_key = tuple(input_ids[input_mask != 0].tolist())
return cache_key
def get_cached_class_embeddings(self, classes_input_ids, classes_attention_mask):
not_cached_index = []
not_cached_classes = []
total_embeddings = []
for idx, _ in enumerate(classes_input_ids):
cache_key = self._get_cache_key_at_index(classes_input_ids, classes_attention_mask, idx)
if self.language_cache_class.has(cache_key):
total_embeddings.append(self.language_cache_class.get(cache_key))
else:
total_embeddings.append(None)
not_cached_index.append(idx)
not_cached_classes.append(cache_key)
if not_cached_classes:
not_cached_classes_ids = torch.stack([classes_input_ids[idx] for idx in not_cached_index])
embeddings = self.language_backbone(not_cached_classes_ids, encode_type='class')
for idx, emb in enumerate(embeddings):
idx_to_put = not_cached_index[idx]
total_embeddings[idx_to_put] = emb
self.language_cache_class.put(not_cached_classes[idx], emb)
total_class_embs = torch.stack(total_embeddings).to(self.device)
return total_class_embs
def get_cached_task_embeddings(self, tasks_input_ids, tasks_attention_mask):
not_cached_index = []
not_cached_tasks = []
total_task_features = []
total_task_masks = []
for idx, _ in enumerate(tasks_input_ids):
cache_key = self._get_cache_key_at_index(tasks_input_ids, tasks_attention_mask, idx)
if self.language_cache_prompt.has(cache_key):
task_feature, task_mask = self.language_cache_prompt.get(cache_key)
total_task_features.append(task_feature)
total_task_masks.append(task_mask)
else:
total_task_features.append(None)
total_task_masks.append(None)
not_cached_index.append(idx)
not_cached_tasks.append(cache_key)
if not_cached_tasks:
not_cached_index_ids = torch.stack([tasks_input_ids[idx] for idx in not_cached_index])
not_cached_mask = torch.stack([tasks_attention_mask[idx] for idx in not_cached_index])
embeddings, masks = self.language_backbone(not_cached_index_ids, mask=not_cached_mask, encode_type='task')
for idx in range(embeddings.shape[1]):
emb = embeddings[:, [idx], :]
idx_to_put = not_cached_index[idx]
cur_mask = torch.unsqueeze(masks[idx], dim=0).to(self.device)
total_task_features[idx_to_put] = emb
total_task_masks[idx_to_put] = cur_mask
self.language_cache_prompt.put(not_cached_tasks[idx], (emb, cur_mask))
max_len = max([task.shape[0] for task in total_task_features])
for idx, task in enumerate(total_task_features):
if task.shape[0] < max_len:
pad_size = max_len - task.shape[0]
total_task_features[idx] = F.pad(task, (0, 0, 0, 0, 0, pad_size))
total_task_masks[idx] = F.pad(total_task_masks[idx], (0, pad_size))
total_task_features = torch.cat(total_task_features, dim=1).to(self.device)
total_task_masks = torch.cat(total_task_masks, dim=0).to(self.device)
return (total_task_features, total_task_masks)
def get_language_embedding(self, classes_input_ids, classes_attention_mask, tasks_input_ids, tasks_attention_mask, classes_structure):
batched_classes_embeddings = self.get_cached_class_embeddings(classes_input_ids, classes_attention_mask)
max_class_size = torch.max(classes_structure)
class_embeddings_regrouped = []
start = 0
for size in classes_structure:
pad_size = max_class_size - size
class_embeddings_regrouped.append(F.pad(batched_classes_embeddings[start:start + size], (0, 0, 0, pad_size)).unsqueeze(1))
start += size
class_embeddings = torch.cat(class_embeddings_regrouped, dim=1)
task_embeddings, task_mask = self.get_cached_task_embeddings(tasks_input_ids, tasks_attention_mask)
return (class_embeddings, task_embeddings, task_mask)
|
@auto_docstring
class OmDetTurboPreTrainedModel(PreTrainedModel):
def _init_weights(self, module):
pass
def linear_init_(module_to_init):
pass
def _set_gradient_checkpointing(self, module, value=False):
pass
@staticmethod
def _get_cache_key_at_index(input_ids, attention_mask, index):
pass
def get_cached_class_embeddings(self, classes_input_ids, classes_attention_mask):
pass
def get_cached_task_embeddings(self, tasks_input_ids, tasks_attention_mask):
pass
def get_language_embedding(self, classes_input_ids, classes_attention_mask, tasks_input_ids, tasks_attention_mask, classes_structure):
pass
| 10
| 0
| 19
| 1
| 17
| 0
| 4
| 0.02
| 1
| 5
| 2
| 2
| 5
| 0
| 6
| 6
| 136
| 16
| 118
| 57
| 102
| 2
| 104
| 49
| 96
| 8
| 1
| 2
| 27
|
4,254
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/omdet_turbo/modeling_omdet_turbo.py
|
transformers.models.omdet_turbo.modeling_omdet_turbo.OmDetTurboRepVggBlock
|
from .configuration_omdet_turbo import OmDetTurboConfig
from ...activations import ACT2CLS, ACT2FN
from torch import Tensor, nn
class OmDetTurboRepVggBlock(nn.Module):
"""
RepVGG architecture block introduced by the work "RepVGG: Making VGG-style ConvNets Great Again".
"""
def __init__(self, config: OmDetTurboConfig):
super().__init__()
activation = config.csp_activation
hidden_channels = int(config.encoder_hidden_dim * config.hidden_expansion)
self.conv1 = OmDetTurboConvNormLayer(config, hidden_channels, hidden_channels, 3, 1, padding=1)
self.conv2 = OmDetTurboConvNormLayer(config, hidden_channels, hidden_channels, 1, 1, padding=0)
self.activation = nn.Identity() if activation is None else ACT2CLS[activation]()
def forward(self, x):
y = self.conv1(x) + self.conv2(x)
return self.activation(y)
|
class OmDetTurboRepVggBlock(nn.Module):
'''
RepVGG architecture block introduced by the work "RepVGG: Making VGG-style ConvNets Great Again".
'''
def __init__(self, config: OmDetTurboConfig):
pass
def forward(self, x):
pass
| 3
| 1
| 6
| 1
| 5
| 0
| 2
| 0.27
| 1
| 4
| 2
| 0
| 2
| 3
| 2
| 12
| 17
| 3
| 11
| 9
| 8
| 3
| 11
| 9
| 8
| 2
| 1
| 0
| 3
|
4,255
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/omdet_turbo/modeling_omdet_turbo.py
|
transformers.models.omdet_turbo.modeling_omdet_turbo.OmDetTurboResidualLayer
|
from torch import Tensor, nn
class OmDetTurboResidualLayer(nn.Module):
"""
A residual connection followed by a layer norm.
"""
def __init__(self, config):
super().__init__()
self.norm1 = nn.LayerNorm(config.class_embed_dim, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.decoder_dropout)
def forward(self, x, y):
return self.norm1(x + self.dropout(y))
|
class OmDetTurboResidualLayer(nn.Module):
'''
A residual connection followed by a layer norm.
'''
def __init__(self, config):
pass
def forward(self, x, y):
pass
| 3
| 1
| 3
| 0
| 3
| 0
| 1
| 0.43
| 1
| 1
| 0
| 0
| 2
| 2
| 2
| 12
| 12
| 2
| 7
| 5
| 4
| 3
| 7
| 5
| 4
| 1
| 1
| 0
| 2
|
4,256
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/omdet_turbo/modeling_omdet_turbo.py
|
transformers.models.omdet_turbo.modeling_omdet_turbo.OmDetTurboTaskEncoder
|
from torch import Tensor, nn
class OmDetTurboTaskEncoder(nn.Module):
def __init__(self, config):
super().__init__()
self.mlp = OmDetTurboMLPWithDropout(config)
self.res1 = OmDetTurboResidualLayer(config)
def forward(self, x):
mlp_out = self.mlp(x)
x = self.res1(x, mlp_out)
return x
|
class OmDetTurboTaskEncoder(nn.Module):
def __init__(self, config):
pass
def forward(self, x):
pass
| 3
| 0
| 4
| 0
| 4
| 0
| 1
| 0
| 1
| 3
| 2
| 0
| 2
| 2
| 2
| 12
| 10
| 1
| 9
| 6
| 6
| 0
| 9
| 6
| 6
| 1
| 1
| 0
| 2
|
4,257
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/omdet_turbo/modeling_omdet_turbo.py
|
transformers.models.omdet_turbo.modeling_omdet_turbo.OmDetTurboVisionBackbone
|
from .configuration_omdet_turbo import OmDetTurboConfig
from ...utils.backbone_utils import load_backbone
from torch import Tensor, nn
class OmDetTurboVisionBackbone(nn.Module):
def __init__(self, config: OmDetTurboConfig):
super().__init__()
self.apply_layernorm_after_vision_backbone = config.apply_layernorm_after_vision_backbone
self.vision_backbone = load_backbone(config)
self.layer_norms = nn.ModuleList([nn.LayerNorm(in_channel_dim, eps=config.layer_norm_eps) for in_channel_dim in config.encoder_in_channels])
def forward(self, pixel_values):
outputs = self.vision_backbone(pixel_values).feature_maps
if self.apply_layernorm_after_vision_backbone:
outputs = [layer_norm(output).permute(0, 3, 1, 2).contiguous() for layer_norm, output in zip(self.layer_norms, outputs)]
return outputs
|
class OmDetTurboVisionBackbone(nn.Module):
def __init__(self, config: OmDetTurboConfig):
pass
def forward(self, pixel_values):
pass
| 3
| 0
| 8
| 1
| 8
| 0
| 2
| 0
| 1
| 3
| 1
| 0
| 2
| 3
| 2
| 12
| 18
| 2
| 16
| 7
| 13
| 0
| 11
| 7
| 8
| 2
| 1
| 1
| 3
|
4,258
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/omdet_turbo/processing_omdet_turbo.py
|
transformers.models.omdet_turbo.processing_omdet_turbo.DictWithDeprecationWarning
|
import warnings
class DictWithDeprecationWarning(dict):
message = 'The `classes` key is deprecated for `OmDetTurboProcessor.post_process_grounded_object_detection` output dict and will be removed in a 4.51.0 version. Please use `text_labels` instead.'
def __getitem__(self, key):
if key == 'classes':
warnings.warn(self.message, FutureWarning)
return super().__getitem__('text_labels')
return super().__getitem__(key)
def get(self, key, *args, **kwargs):
if key == 'classes':
warnings.warn(self.message, FutureWarning)
return super().get('text_labels', *args, **kwargs)
return super().get(key, *args, **kwargs)
|
class DictWithDeprecationWarning(dict):
def __getitem__(self, key):
pass
def get(self, key, *args, **kwargs):
pass
| 3
| 0
| 5
| 0
| 5
| 0
| 2
| 0
| 1
| 2
| 0
| 0
| 2
| 0
| 2
| 29
| 17
| 2
| 15
| 4
| 12
| 0
| 12
| 4
| 9
| 2
| 2
| 1
| 4
|
4,259
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/omdet_turbo/processing_omdet_turbo.py
|
transformers.models.omdet_turbo.processing_omdet_turbo.OmDetTurboProcessor
|
from ...image_utils import ImageInput
from ...utils import TensorType, is_torch_available, is_torchvision_available
from ...processing_utils import ProcessingKwargs, ProcessorMixin, TextKwargs, Unpack
from typing import TYPE_CHECKING, Optional, Union
from ...feature_extraction_utils import BatchFeature
from ...utils.import_utils import requires
@requires(backends=('vision', 'torchvision'))
class OmDetTurboProcessor(ProcessorMixin):
"""
Constructs a OmDet-Turbo processor which wraps a Deformable DETR image processor and an AutoTokenizer into a
single processor.
[`OmDetTurboProcessor`] offers all the functionalities of [`DetrImageProcessor`] and
[`AutoTokenizer`]. See the docstring of [`~OmDetTurboProcessor.__call__`] and [`~OmDetTurboProcessor.decode`]
for more information.
Args:
image_processor (`DetrImageProcessor`):
An instance of [`DetrImageProcessor`]. The image processor is a required input.
tokenizer (`AutoTokenizer`):
An instance of ['PreTrainedTokenizer`]. The tokenizer is a required input.
"""
attributes = ['image_processor', 'tokenizer']
image_processor_class = ('DetrImageProcessor', 'DetrImageProcessorFast')
tokenizer_class = 'AutoTokenizer'
def __init__(self, image_processor, tokenizer):
super().__init__(image_processor, tokenizer)
def __call__(self, images: Optional[ImageInput]=None, text: Optional[Union[list[str], list[list[str]]]]=None, audio=None, videos=None, **kwargs: Unpack[OmDetTurboProcessorKwargs]) -> BatchFeature:
"""
This method uses [*DetrImageProcessor.__call__] method to prepare image(s) for the model, and
[CLIPTokenizerFast.__call__] to prepare text for the model.
Please refer to the docstring of the above two methods for more information.
Args:
images (`ImageInput`):
Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255.
text (`Union[str, list[str], list[list[str]]]`):
The classes used to limit the scope of the open vocabulary detection. Expects a list of strings or a list
of list of strings. Batched classes can be of different lengths.
Examples: ["cat", "dog", "bird"], [["cat", "dog", "bird"], ["hat", "person"], ["car"]]
Kwargs:
task (`Union[str, list[str], TextInput, PreTokenizedInput]`):
The grounded text used to guide open vocabulary detection. Expects a single string or a list of strings.
Examples: "Detect a cat, a dog, and a bird.",[ "Detect everything.", "Detect trees and flowers."]
When not provided, the default task is "Detect [class1], [class2], [class3]" etc.
...
"""
if images is None or text is None:
raise ValueError('You have to specify both `images` and `text`')
output_kwargs = self._merge_kwargs(OmDetTurboProcessorKwargs, tokenizer_init_kwargs=self.tokenizer.init_kwargs, **kwargs)
if isinstance(text, str):
text = text.strip(' ').split(',')
if not (len(text) and isinstance(text[0], (list, tuple))):
text = [text]
task = output_kwargs['text_kwargs'].pop('task', None)
if task is None:
task = ['Detect {}.'.format(', '.join(text_single)) for text_single in text]
elif not isinstance(task, (list, tuple)):
task = [task]
encoding_image_processor = self.image_processor(images, **output_kwargs['images_kwargs'])
tasks_encoding = self.tokenizer(text=task, **output_kwargs['text_kwargs'])
classes = text
classes_structure = torch.tensor([len(class_single) for class_single in classes], dtype=torch.long)
classes_flattened = [class_single for class_batch in classes for class_single in class_batch]
classes_encoding = self.tokenizer(text=classes_flattened, **output_kwargs['text_kwargs'])
encoding = BatchFeature()
encoding.update({f'tasks_{key}': value for key, value in tasks_encoding.items()})
encoding.update({f'classes_{key}': value for key, value in classes_encoding.items()})
encoding.update({'classes_structure': classes_structure})
encoding.update(encoding_image_processor)
return encoding
@property
def model_input_names(self):
image_processor_input_names = self.image_processor.model_input_names
tokenizer_input_names = ['classes_attention_mask', 'tasks_attention_mask', 'tasks_input_ids', 'classes_input_ids', 'classes_structure']
return tokenizer_input_names + image_processor_input_names
def _get_default_image_size(self) -> tuple[int, int]:
height = self.image_processor.size['height'] if 'height' in self.image_processor.size else self.image_processor.size['shortest_edge']
width = self.image_processor.size['width'] if 'width' in self.image_processor.size else self.image_processor.size['longest_edge']
return (height, width)
def post_process_grounded_object_detection(self, outputs: 'OmDetTurboObjectDetectionOutput', text_labels: Optional[Union[list[str], list[list[str]]]]=None, threshold: float=0.3, nms_threshold: float=0.5, target_sizes: Optional[Union[TensorType, list[tuple]]]=None, max_num_det: Optional[int]=None):
"""
Converts the raw output of [`OmDetTurboForObjectDetection`] into final bounding boxes in (top_left_x, top_left_y,
bottom_right_x, bottom_right_y) format and get the associated text class.
Args:
outputs ([`OmDetTurboObjectDetectionOutput`]):
Raw outputs of the model.
text_labels (Union[list[str], list[list[str]]], *optional*):
The input classes names. If not provided, `text_labels` will be set to `None` in `outputs`.
threshold (float, defaults to 0.3):
Only return detections with a confidence score exceeding this threshold.
nms_threshold (float, defaults to 0.5):
The threshold to use for box non-maximum suppression. Value in [0, 1].
target_sizes (`torch.Tensor` or `list[tuple[int, int]]`, *optional*):
Tensor of shape `(batch_size, 2)` or list of tuples (`tuple[int, int]`) containing the target size
`(height, width)` of each image in the batch. If unset, predictions will not be resized.
max_num_det (`int`, *optional*):
The maximum number of detections to return.
Returns:
`list[Dict]`: A list of dictionaries, each dictionary containing the scores, classes and boxes for an image
in the batch as predicted by the model.
"""
batch_size = len(outputs.decoder_coord_logits)
if target_sizes is None:
height, width = self._get_default_image_size()
target_sizes = [(height, width)] * batch_size
if any((len(image_size) != 2 for image_size in target_sizes)):
raise ValueError('Each element of target_sizes must contain the size (height, width) of each image of the batch')
if len(target_sizes) != batch_size:
raise ValueError('Make sure that you pass in as many target sizes as output sequences')
if text_labels is not None and isinstance(text_labels[0], str):
text_labels = [text_labels]
if text_labels is not None and len(text_labels) != batch_size:
raise ValueError('Make sure that you pass in as many classes group as output sequences')
if isinstance(target_sizes, torch.Tensor):
target_sizes = target_sizes.tolist()
batch_boxes = outputs.decoder_coord_logits
batch_logits = outputs.decoder_class_logits
batch_num_classes = outputs.classes_structure
batch_scores, batch_labels = compute_score(batch_logits)
results = []
for boxes, scores, image_size, image_num_classes in zip(batch_boxes, batch_scores, target_sizes, batch_num_classes):
boxes, scores, labels = _post_process_boxes_for_image(boxes=boxes, scores=scores, labels=batch_labels, image_num_classes=image_num_classes, image_size=image_size, threshold=threshold, nms_threshold=nms_threshold, max_num_det=max_num_det)
result = DictWithDeprecationWarning({'boxes': boxes, 'scores': scores, 'labels': labels, 'text_labels': None})
results.append(result)
if text_labels is not None:
for result, image_text_labels in zip(results, text_labels):
result['text_labels'] = [image_text_labels[idx] for idx in result['labels']]
return results
|
@requires(backends=('vision', 'torchvision'))
class OmDetTurboProcessor(ProcessorMixin):
'''
Constructs a OmDet-Turbo processor which wraps a Deformable DETR image processor and an AutoTokenizer into a
single processor.
[`OmDetTurboProcessor`] offers all the functionalities of [`DetrImageProcessor`] and
[`AutoTokenizer`]. See the docstring of [`~OmDetTurboProcessor.__call__`] and [`~OmDetTurboProcessor.decode`]
for more information.
Args:
image_processor (`DetrImageProcessor`):
An instance of [`DetrImageProcessor`]. The image processor is a required input.
tokenizer (`AutoTokenizer`):
An instance of ['PreTrainedTokenizer`]. The tokenizer is a required input.
'''
def __init__(self, image_processor, tokenizer):
pass
def __call__(self, images: Optional[ImageInput]=None, text: Optional[Union[list[str], list[list[str]]]]=None, audio=None, videos=None, **kwargs: Unpack[OmDetTurboProcessorKwargs]) -> BatchFeature:
'''
This method uses [*DetrImageProcessor.__call__] method to prepare image(s) for the model, and
[CLIPTokenizerFast.__call__] to prepare text for the model.
Please refer to the docstring of the above two methods for more information.
Args:
images (`ImageInput`):
Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255.
text (`Union[str, list[str], list[list[str]]]`):
The classes used to limit the scope of the open vocabulary detection. Expects a list of strings or a list
of list of strings. Batched classes can be of different lengths.
Examples: ["cat", "dog", "bird"], [["cat", "dog", "bird"], ["hat", "person"], ["car"]]
Kwargs:
task (`Union[str, list[str], TextInput, PreTokenizedInput]`):
The grounded text used to guide open vocabulary detection. Expects a single string or a list of strings.
Examples: "Detect a cat, a dog, and a bird.",[ "Detect everything.", "Detect trees and flowers."]
When not provided, the default task is "Detect [class1], [class2], [class3]" etc.
...
'''
pass
@property
def model_input_names(self):
pass
def _get_default_image_size(self) -> tuple[int, int]:
pass
def post_process_grounded_object_detection(self, outputs: 'OmDetTurboObjectDetectionOutput', text_labels: Optional[Union[list[str], list[list[str]]]]=None, threshold: float=0.3, nms_threshold: float=0.5, target_sizes: Optional[Union[TensorType, list[tuple]]]=None, max_num_det: Optional[int]=None):
'''
Converts the raw output of [`OmDetTurboForObjectDetection`] into final bounding boxes in (top_left_x, top_left_y,
bottom_right_x, bottom_right_y) format and get the associated text class.
Args:
outputs ([`OmDetTurboObjectDetectionOutput`]):
Raw outputs of the model.
text_labels (Union[list[str], list[list[str]]], *optional*):
The input classes names. If not provided, `text_labels` will be set to `None` in `outputs`.
threshold (float, defaults to 0.3):
Only return detections with a confidence score exceeding this threshold.
nms_threshold (float, defaults to 0.5):
The threshold to use for box non-maximum suppression. Value in [0, 1].
target_sizes (`torch.Tensor` or `list[tuple[int, int]]`, *optional*):
Tensor of shape `(batch_size, 2)` or list of tuples (`tuple[int, int]`) containing the target size
`(height, width)` of each image in the batch. If unset, predictions will not be resized.
max_num_det (`int`, *optional*):
The maximum number of detections to return.
Returns:
`list[Dict]`: A list of dictionaries, each dictionary containing the scores, classes and boxes for an image
in the batch as predicted by the model.
'''
pass
| 8
| 3
| 30
| 4
| 18
| 9
| 4
| 0.59
| 1
| 12
| 3
| 0
| 6
| 0
| 6
| 23
| 209
| 33
| 111
| 48
| 87
| 65
| 66
| 32
| 59
| 10
| 2
| 2
| 22
|
4,260
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/omdet_turbo/processing_omdet_turbo.py
|
transformers.models.omdet_turbo.processing_omdet_turbo.OmDetTurboProcessorKwargs
|
from ...processing_utils import ProcessingKwargs, ProcessorMixin, TextKwargs, Unpack
class OmDetTurboProcessorKwargs(ProcessingKwargs, total=False):
text_kwargs: OmDetTurboTextKwargs
_defaults = {'text_kwargs': {'add_special_tokens': True, 'padding': 'max_length', 'truncation': True, 'max_length': 77, 'stride': 0, 'return_overflowing_tokens': False, 'return_special_tokens_mask': False, 'return_offsets_mapping': False, 'return_token_type_ids': False, 'return_length': False, 'verbose': True, 'task': None}, 'images_kwargs': {}}
|
class OmDetTurboProcessorKwargs(ProcessingKwargs, total=False):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 19
| 0
| 19
| 2
| 18
| 0
| 3
| 2
| 2
| 0
| 3
| 0
| 0
|
4,261
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/omdet_turbo/processing_omdet_turbo.py
|
transformers.models.omdet_turbo.processing_omdet_turbo.OmDetTurboTextKwargs
|
from ...tokenization_utils_base import PreTokenizedInput, TextInput
from ...processing_utils import ProcessingKwargs, ProcessorMixin, TextKwargs, Unpack
from typing import TYPE_CHECKING, Optional, Union
class OmDetTurboTextKwargs(TextKwargs, total=False):
task: Optional[Union[str, list[str], TextInput, PreTokenizedInput]]
|
class OmDetTurboTextKwargs(TextKwargs, total=False):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2
| 0
| 2
| 1
| 1
| 0
| 2
| 1
| 1
| 0
| 2
| 0
| 0
|
4,262
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/oneformer/configuration_oneformer.py
|
transformers.models.oneformer.configuration_oneformer.OneFormerConfig
|
from ..auto import CONFIG_MAPPING
from ...configuration_utils import PretrainedConfig
from typing import Optional
from ...utils.backbone_utils import verify_backbone_config_arguments
class OneFormerConfig(PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`OneFormerModel`]. It is used to instantiate a
OneFormer model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the OneFormer
[shi-labs/oneformer_ade20k_swin_tiny](https://huggingface.co/shi-labs/oneformer_ade20k_swin_tiny) architecture
trained on [ADE20k-150](https://huggingface.co/datasets/scene_parse_150).
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
backbone_config (`PretrainedConfig`, *optional*, defaults to `SwinConfig`):
The configuration of the backbone model.
backbone (`str`, *optional*):
Name of backbone to use when `backbone_config` is `None`. If `use_pretrained_backbone` is `True`, this
will load the corresponding pretrained weights from the timm or transformers library. If `use_pretrained_backbone`
is `False`, this loads the backbone's config and uses that to initialize the backbone with random weights.
use_pretrained_backbone (`bool`, *optional*, defaults to `False`):
Whether to use pretrained weights for the backbone.
use_timm_backbone (`bool`, *optional*, defaults to `False`):
Whether to load `backbone` from the timm library. If `False`, the backbone is loaded from the transformers
library.
backbone_kwargs (`dict`, *optional*):
Keyword arguments to be passed to AutoBackbone when loading from a checkpoint
e.g. `{'out_indices': (0, 1, 2, 3)}`. Cannot be specified if `backbone_config` is set.
ignore_value (`int`, *optional*, defaults to 255):
Values to be ignored in GT label while calculating loss.
num_queries (`int`, *optional*, defaults to 150):
Number of object queries.
no_object_weight (`float`, *optional*, defaults to 0.1):
Weight for no-object class predictions.
class_weight (`float`, *optional*, defaults to 2.0):
Weight for Classification CE loss.
mask_weight (`float`, *optional*, defaults to 5.0):
Weight for binary CE loss.
dice_weight (`float`, *optional*, defaults to 5.0):
Weight for dice loss.
contrastive_weight (`float`, *optional*, defaults to 0.5):
Weight for contrastive loss.
contrastive_temperature (`float`, *optional*, defaults to 0.07):
Initial value for scaling the contrastive logits.
train_num_points (`int`, *optional*, defaults to 12544):
Number of points to sample while calculating losses on mask predictions.
oversample_ratio (`float`, *optional*, defaults to 3.0):
Ratio to decide how many points to oversample.
importance_sample_ratio (`float`, *optional*, defaults to 0.75):
Ratio of points that are sampled via importance sampling.
init_std (`float`, *optional*, defaults to 0.02):
Standard deviation for normal initialization.
init_xavier_std (`float`, *optional*, defaults to 1.0):
Standard deviation for xavier uniform initialization.
layer_norm_eps (`float`, *optional*, defaults to 1e-05):
Epsilon for layer normalization.
is_training (`bool`, *optional*, defaults to `False`):
Whether to run in training or inference mode.
use_auxiliary_loss (`bool`, *optional*, defaults to `True`):
Whether to calculate loss using intermediate predictions from transformer decoder.
output_auxiliary_logits (`bool`, *optional*, defaults to `True`):
Whether to return intermediate predictions from transformer decoder.
strides (`list`, *optional*, defaults to `[4, 8, 16, 32]`):
List containing the strides for feature maps in the encoder.
task_seq_len (`int`, *optional*, defaults to 77):
Sequence length for tokenizing text list input.
text_encoder_width (`int`, *optional*, defaults to 256):
Hidden size for text encoder.
text_encoder_context_length (`int`, *optional*, defaults to 77):
Input sequence length for text encoder.
text_encoder_num_layers (`int`, *optional*, defaults to 6):
Number of layers for transformer in text encoder.
text_encoder_vocab_size (`int`, *optional*, defaults to 49408):
Vocabulary size for tokenizer.
text_encoder_proj_layers (`int`, *optional*, defaults to 2):
Number of layers in MLP for project text queries.
text_encoder_n_ctx (`int`, *optional*, defaults to 16):
Number of learnable text context queries.
conv_dim (`int`, *optional*, defaults to 256):
Feature map dimension to map outputs from the backbone.
mask_dim (`int`, *optional*, defaults to 256):
Dimension for feature maps in pixel decoder.
hidden_dim (`int`, *optional*, defaults to 256):
Dimension for hidden states in transformer decoder.
encoder_feedforward_dim (`int`, *optional*, defaults to 1024):
Dimension for FFN layer in pixel decoder.
norm (`str`, *optional*, defaults to `"GN"`):
Type of normalization.
encoder_layers (`int`, *optional*, defaults to 6):
Number of layers in pixel decoder.
decoder_layers (`int`, *optional*, defaults to 10):
Number of layers in transformer decoder.
use_task_norm (`bool`, *optional*, defaults to `True`):
Whether to normalize the task token.
num_attention_heads (`int`, *optional*, defaults to 8):
Number of attention heads in transformer layers in the pixel and transformer decoders.
dropout (`float`, *optional*, defaults to 0.1):
Dropout probability for pixel and transformer decoders.
dim_feedforward (`int`, *optional*, defaults to 2048):
Dimension for FFN layer in transformer decoder.
pre_norm (`bool`, *optional*, defaults to `False`):
Whether to normalize hidden states before attention layers in transformer decoder.
enforce_input_proj (`bool`, *optional*, defaults to `False`):
Whether to project hidden states in transformer decoder.
query_dec_layers (`int`, *optional*, defaults to 2):
Number of layers in query transformer.
common_stride (`int`, *optional*, defaults to 4):
Common stride used for features in pixel decoder.
Examples:
```python
>>> from transformers import OneFormerConfig, OneFormerModel
>>> # Initializing a OneFormer shi-labs/oneformer_ade20k_swin_tiny configuration
>>> configuration = OneFormerConfig()
>>> # Initializing a model (with random weights) from the shi-labs/oneformer_ade20k_swin_tiny style configuration
>>> model = OneFormerModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```
"""
model_type = 'oneformer'
attribute_map = {'hidden_size': 'hidden_dim'}
def __init__(self, backbone_config: Optional[dict]=None, backbone: Optional[str]=None, use_pretrained_backbone: bool=False, use_timm_backbone: bool=False, backbone_kwargs: Optional[dict]=None, ignore_value: int=255, num_queries: int=150, no_object_weight: int=0.1, class_weight: float=2.0, mask_weight: float=5.0, dice_weight: float=5.0, contrastive_weight: float=0.5, contrastive_temperature: float=0.07, train_num_points: int=12544, oversample_ratio: float=3.0, importance_sample_ratio: float=0.75, init_std: float=0.02, init_xavier_std: float=1.0, layer_norm_eps: float=1e-05, is_training: bool=False, use_auxiliary_loss: bool=True, output_auxiliary_logits: bool=True, strides: Optional[list]=[4, 8, 16, 32], task_seq_len: int=77, text_encoder_width: int=256, text_encoder_context_length: int=77, text_encoder_num_layers: int=6, text_encoder_vocab_size: int=49408, text_encoder_proj_layers: int=2, text_encoder_n_ctx: int=16, conv_dim: int=256, mask_dim: int=256, hidden_dim: int=256, encoder_feedforward_dim: int=1024, norm: str='GN', encoder_layers: int=6, decoder_layers: int=10, use_task_norm: bool=True, num_attention_heads: int=8, dropout: float=0.1, dim_feedforward: int=2048, pre_norm: bool=False, enforce_input_proj: bool=False, query_dec_layers: int=2, common_stride: int=4, **kwargs):
if backbone_config is None and backbone is None:
logger.info('`backbone_config` is unset. Initializing the config with the default `Swin` backbone.')
backbone_config = CONFIG_MAPPING['swin'](image_size=224, num_channels=3, patch_size=4, embed_dim=96, depths=[2, 2, 6, 2], num_heads=[3, 6, 12, 24], window_size=7, drop_path_rate=0.3, use_absolute_embeddings=False, out_features=['stage1', 'stage2', 'stage3', 'stage4'])
elif isinstance(backbone_config, dict):
backbone_model_type = backbone_config.get('model_type')
config_class = CONFIG_MAPPING[backbone_model_type]
backbone_config = config_class.from_dict(backbone_config)
verify_backbone_config_arguments(use_timm_backbone=use_timm_backbone, use_pretrained_backbone=use_pretrained_backbone, backbone=backbone, backbone_config=backbone_config, backbone_kwargs=backbone_kwargs)
self.backbone_config = backbone_config
self.backbone = backbone
self.use_pretrained_backbone = use_pretrained_backbone
self.use_timm_backbone = use_timm_backbone
self.backbone_kwargs = backbone_kwargs
self.ignore_value = ignore_value
self.num_queries = num_queries
self.no_object_weight = no_object_weight
self.class_weight = class_weight
self.mask_weight = mask_weight
self.dice_weight = dice_weight
self.contrastive_weight = contrastive_weight
self.contrastive_temperature = contrastive_temperature
self.train_num_points = train_num_points
self.oversample_ratio = oversample_ratio
self.importance_sample_ratio = importance_sample_ratio
self.init_std = init_std
self.init_xavier_std = init_xavier_std
self.layer_norm_eps = layer_norm_eps
self.is_training = is_training
self.use_auxiliary_loss = use_auxiliary_loss
self.output_auxiliary_logits = output_auxiliary_logits
self.strides = strides
self.task_seq_len = task_seq_len
self.text_encoder_width = text_encoder_width
self.text_encoder_context_length = text_encoder_context_length
self.text_encoder_num_layers = text_encoder_num_layers
self.text_encoder_vocab_size = text_encoder_vocab_size
self.text_encoder_proj_layers = text_encoder_proj_layers
self.text_encoder_n_ctx = text_encoder_n_ctx
self.conv_dim = conv_dim
self.mask_dim = mask_dim
self.hidden_dim = hidden_dim
self.encoder_feedforward_dim = encoder_feedforward_dim
self.norm = norm
self.encoder_layers = encoder_layers
self.decoder_layers = decoder_layers
self.use_task_norm = use_task_norm
self.num_attention_heads = num_attention_heads
self.dropout = dropout
self.dim_feedforward = dim_feedforward
self.pre_norm = pre_norm
self.enforce_input_proj = enforce_input_proj
self.query_dec_layers = query_dec_layers
self.common_stride = common_stride
self.num_hidden_layers = decoder_layers
super().__init__(**kwargs)
@property
def sub_configs(self):
return {'backbone_config': type(self.backbone_config)} if getattr(self, 'backbone_config', None) is not None else {}
|
class OneFormerConfig(PretrainedConfig):
'''
This is the configuration class to store the configuration of a [`OneFormerModel`]. It is used to instantiate a
OneFormer model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the OneFormer
[shi-labs/oneformer_ade20k_swin_tiny](https://huggingface.co/shi-labs/oneformer_ade20k_swin_tiny) architecture
trained on [ADE20k-150](https://huggingface.co/datasets/scene_parse_150).
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
backbone_config (`PretrainedConfig`, *optional*, defaults to `SwinConfig`):
The configuration of the backbone model.
backbone (`str`, *optional*):
Name of backbone to use when `backbone_config` is `None`. If `use_pretrained_backbone` is `True`, this
will load the corresponding pretrained weights from the timm or transformers library. If `use_pretrained_backbone`
is `False`, this loads the backbone's config and uses that to initialize the backbone with random weights.
use_pretrained_backbone (`bool`, *optional*, defaults to `False`):
Whether to use pretrained weights for the backbone.
use_timm_backbone (`bool`, *optional*, defaults to `False`):
Whether to load `backbone` from the timm library. If `False`, the backbone is loaded from the transformers
library.
backbone_kwargs (`dict`, *optional*):
Keyword arguments to be passed to AutoBackbone when loading from a checkpoint
e.g. `{'out_indices': (0, 1, 2, 3)}`. Cannot be specified if `backbone_config` is set.
ignore_value (`int`, *optional*, defaults to 255):
Values to be ignored in GT label while calculating loss.
num_queries (`int`, *optional*, defaults to 150):
Number of object queries.
no_object_weight (`float`, *optional*, defaults to 0.1):
Weight for no-object class predictions.
class_weight (`float`, *optional*, defaults to 2.0):
Weight for Classification CE loss.
mask_weight (`float`, *optional*, defaults to 5.0):
Weight for binary CE loss.
dice_weight (`float`, *optional*, defaults to 5.0):
Weight for dice loss.
contrastive_weight (`float`, *optional*, defaults to 0.5):
Weight for contrastive loss.
contrastive_temperature (`float`, *optional*, defaults to 0.07):
Initial value for scaling the contrastive logits.
train_num_points (`int`, *optional*, defaults to 12544):
Number of points to sample while calculating losses on mask predictions.
oversample_ratio (`float`, *optional*, defaults to 3.0):
Ratio to decide how many points to oversample.
importance_sample_ratio (`float`, *optional*, defaults to 0.75):
Ratio of points that are sampled via importance sampling.
init_std (`float`, *optional*, defaults to 0.02):
Standard deviation for normal initialization.
init_xavier_std (`float`, *optional*, defaults to 1.0):
Standard deviation for xavier uniform initialization.
layer_norm_eps (`float`, *optional*, defaults to 1e-05):
Epsilon for layer normalization.
is_training (`bool`, *optional*, defaults to `False`):
Whether to run in training or inference mode.
use_auxiliary_loss (`bool`, *optional*, defaults to `True`):
Whether to calculate loss using intermediate predictions from transformer decoder.
output_auxiliary_logits (`bool`, *optional*, defaults to `True`):
Whether to return intermediate predictions from transformer decoder.
strides (`list`, *optional*, defaults to `[4, 8, 16, 32]`):
List containing the strides for feature maps in the encoder.
task_seq_len (`int`, *optional*, defaults to 77):
Sequence length for tokenizing text list input.
text_encoder_width (`int`, *optional*, defaults to 256):
Hidden size for text encoder.
text_encoder_context_length (`int`, *optional*, defaults to 77):
Input sequence length for text encoder.
text_encoder_num_layers (`int`, *optional*, defaults to 6):
Number of layers for transformer in text encoder.
text_encoder_vocab_size (`int`, *optional*, defaults to 49408):
Vocabulary size for tokenizer.
text_encoder_proj_layers (`int`, *optional*, defaults to 2):
Number of layers in MLP for project text queries.
text_encoder_n_ctx (`int`, *optional*, defaults to 16):
Number of learnable text context queries.
conv_dim (`int`, *optional*, defaults to 256):
Feature map dimension to map outputs from the backbone.
mask_dim (`int`, *optional*, defaults to 256):
Dimension for feature maps in pixel decoder.
hidden_dim (`int`, *optional*, defaults to 256):
Dimension for hidden states in transformer decoder.
encoder_feedforward_dim (`int`, *optional*, defaults to 1024):
Dimension for FFN layer in pixel decoder.
norm (`str`, *optional*, defaults to `"GN"`):
Type of normalization.
encoder_layers (`int`, *optional*, defaults to 6):
Number of layers in pixel decoder.
decoder_layers (`int`, *optional*, defaults to 10):
Number of layers in transformer decoder.
use_task_norm (`bool`, *optional*, defaults to `True`):
Whether to normalize the task token.
num_attention_heads (`int`, *optional*, defaults to 8):
Number of attention heads in transformer layers in the pixel and transformer decoders.
dropout (`float`, *optional*, defaults to 0.1):
Dropout probability for pixel and transformer decoders.
dim_feedforward (`int`, *optional*, defaults to 2048):
Dimension for FFN layer in transformer decoder.
pre_norm (`bool`, *optional*, defaults to `False`):
Whether to normalize hidden states before attention layers in transformer decoder.
enforce_input_proj (`bool`, *optional*, defaults to `False`):
Whether to project hidden states in transformer decoder.
query_dec_layers (`int`, *optional*, defaults to 2):
Number of layers in query transformer.
common_stride (`int`, *optional*, defaults to 4):
Common stride used for features in pixel decoder.
Examples:
```python
>>> from transformers import OneFormerConfig, OneFormerModel
>>> # Initializing a OneFormer shi-labs/oneformer_ade20k_swin_tiny configuration
>>> configuration = OneFormerConfig()
>>> # Initializing a model (with random weights) from the shi-labs/oneformer_ade20k_swin_tiny style configuration
>>> model = OneFormerModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```
'''
def __init__(self, backbone_config: Optional[dict]=None, backbone: Optional[str]=None, use_pretrained_backbone: bool=False, use_timm_backbone: bool=False, backbone_kwargs: Optional[dict]=None, ignore_value: int=255, num_queries: int=150, no_object_weight: int=0.1, class_weight: float=2.0, mask_weight: float=5.0, dice_weight: float=5.0, contrastive_weight: float=0.5, contrastive_temperature: float=0.07, train_num_points: int=12544, oversample_ratio: float=3.0, importance_sample_ratio: float=0.75, init_std: float=0.02, init_xavier_std: float=1.0, layer_norm_eps: float=1e-05, is_training: bool=False, use_auxiliary_loss: bool=True, output_auxiliary_logits: bool=True, strides: Optional[list]=[4, 8, 16, 32], task_seq_len: int=77, text_encoder_width: int=256, text_encoder_context_length: int=77, text_encoder_num_layers: int=6, text_encoder_vocab_size: int=49408, text_encoder_proj_layers: int=2, text_encoder_n_ctx: int=16, conv_dim: int=256, mask_dim: int=256, hidden_dim: int=256, encoder_feedforward_dim: int=1024, norm: str='GN', encoder_layers: int=6, decoder_layers: int=10, use_task_norm: bool=True, num_attention_heads: int=8, dropout: float=0.1, dim_feedforward: int=2048, pre_norm: bool=False, enforce_input_proj: bool=False, query_dec_layers: int=2, common_stride: int=4, **kwargs):
pass
@property
def sub_configs(self):
pass
| 4
| 1
| 124
| 3
| 121
| 0
| 3
| 0.92
| 1
| 7
| 0
| 0
| 1
| 46
| 1
| 1
| 247
| 9
| 124
| 100
| 74
| 114
| 58
| 52
| 56
| 3
| 1
| 1
| 3
|
4,263
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/oneformer/convert_to_hf_oneformer.py
|
transformers.models.oneformer.convert_to_hf_oneformer.Args
|
from dataclasses import dataclass
@dataclass
class Args:
"""Fake command line arguments needed by oneformer/detectron2 implementation"""
config_file: str
|
@dataclass
class Args:
'''Fake command line arguments needed by oneformer/detectron2 implementation'''
pass
| 2
| 1
| 0
| 0
| 0
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 4
| 1
| 2
| 1
| 1
| 1
| 2
| 1
| 1
| 0
| 0
| 0
| 0
|
4,264
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/oneformer/convert_to_hf_oneformer.py
|
transformers.models.oneformer.convert_to_hf_oneformer.OriginalOneFormerCheckpointToOursConverter
|
from pprint import pformat
from transformers.models.oneformer.modeling_oneformer import OneFormerConfig, OneFormerForUniversalSegmentation, OneFormerForUniversalSegmentationOutput, OneFormerModel, OneFormerModelOutput
from collections.abc import Iterator
from torch import Tensor, nn
from pathlib import Path
class OriginalOneFormerCheckpointToOursConverter:
def __init__(self, original_model: nn.Module, config: OneFormerConfig):
self.original_model = original_model
self.config = config
def pop_all(self, renamed_keys: list[tuple[str, str]], dst_state_dict: StateDict, src_state_dict: StateDict):
for src_key, dst_key in renamed_keys:
dst_state_dict[dst_key] = src_state_dict.pop(src_key)
def replace_swin_backbone(self, dst_state_dict: StateDict, src_state_dict: StateDict, config: OneFormerConfig):
dst_prefix: str = 'pixel_level_module.encoder'
src_prefix: str = 'backbone'
renamed_keys = [(f'{src_prefix}.patch_embed.proj.weight', f'{dst_prefix}.embeddings.patch_embeddings.projection.weight'), (f'{src_prefix}.patch_embed.proj.bias', f'{dst_prefix}.embeddings.patch_embeddings.projection.bias'), (f'{src_prefix}.patch_embed.norm.weight', f'{dst_prefix}.embeddings.norm.weight'), (f'{src_prefix}.patch_embed.norm.bias', f'{dst_prefix}.embeddings.norm.bias')]
num_layers = len(config.backbone_config.depths)
for layer_idx in range(num_layers):
for block_idx in range(config.backbone_config.depths[layer_idx]):
renamed_keys.extend([(f'{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.norm1.weight', f'{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.layernorm_before.weight'), (f'{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.norm1.bias', f'{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.layernorm_before.bias'), (f'{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.relative_position_bias_table', f'{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.relative_position_bias_table')])
src_att_weight = src_state_dict[f'{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.qkv.weight']
src_att_bias = src_state_dict[f'{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.qkv.bias']
size = src_att_weight.shape[0]
offset = size // 3
dst_state_dict[f'{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.query.weight'] = src_att_weight[:offset, :]
dst_state_dict[f'{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.query.bias'] = src_att_bias[:offset]
dst_state_dict[f'{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.key.weight'] = src_att_weight[offset:offset * 2, :]
dst_state_dict[f'{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.key.bias'] = src_att_bias[offset:offset * 2]
dst_state_dict[f'{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.value.weight'] = src_att_weight[-offset:, :]
dst_state_dict[f'{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.value.bias'] = src_att_bias[-offset:]
src_state_dict.pop(f'{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.qkv.weight')
src_state_dict.pop(f'{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.qkv.bias')
renamed_keys.extend([(f'{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.proj.weight', f'{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.output.dense.weight'), (f'{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.proj.bias', f'{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.output.dense.bias')])
renamed_keys.extend([(f'{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.norm2.weight', f'{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.layernorm_after.weight'), (f'{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.norm2.bias', f'{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.layernorm_after.bias')])
renamed_keys.extend([(f'{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.mlp.fc1.weight', f'{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.intermediate.dense.weight'), (f'{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.mlp.fc1.bias', f'{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.intermediate.dense.bias'), (f'{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.mlp.fc2.weight', f'{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.output.dense.weight'), (f'{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.mlp.fc2.bias', f'{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.output.dense.bias')])
renamed_keys.extend([(f'{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.relative_position_index', f'{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.relative_position_index')])
if layer_idx < num_layers - 1:
renamed_keys.extend([(f'{src_prefix}.layers.{layer_idx}.downsample.reduction.weight', f'{dst_prefix}.encoder.layers.{layer_idx}.downsample.reduction.weight'), (f'{src_prefix}.layers.{layer_idx}.downsample.norm.weight', f'{dst_prefix}.encoder.layers.{layer_idx}.downsample.norm.weight'), (f'{src_prefix}.layers.{layer_idx}.downsample.norm.bias', f'{dst_prefix}.encoder.layers.{layer_idx}.downsample.norm.bias')])
renamed_keys.extend([(f'{src_prefix}.norm{layer_idx}.weight', f'{dst_prefix}.hidden_states_norms.stage{layer_idx + 1}.weight'), (f'{src_prefix}.norm{layer_idx}.bias', f'{dst_prefix}.hidden_states_norms.stage{layer_idx + 1}.bias')])
self.pop_all(renamed_keys, dst_state_dict, src_state_dict)
def replace_dinat_backbone(self, dst_state_dict: StateDict, src_state_dict: StateDict, config: OneFormerConfig):
dst_prefix: str = 'pixel_level_module.encoder'
src_prefix: str = 'backbone'
def rename_keys_for_weight_bias(src_prefix: str, dst_prefix: str):
return [(f'{src_prefix}.weight', f'{dst_prefix}.weight'), (f'{src_prefix}.bias', f'{dst_prefix}.bias')]
renamed_keys = rename_keys_for_weight_bias(f'{src_prefix}.patch_embed.norm', f'{dst_prefix}.embeddings.norm')
for i in range(2):
renamed_keys.extend(rename_keys_for_weight_bias(f'{src_prefix}.patch_embed.proj.{i}', f'{dst_prefix}.embeddings.patch_embeddings.projection.{i}'))
num_layers = len(config.backbone_config.depths)
for layer_idx in range(num_layers):
for block_idx in range(config.backbone_config.depths[layer_idx]):
renamed_keys.extend(rename_keys_for_weight_bias(f'{src_prefix}.levels.{layer_idx}.blocks.{block_idx}.norm1', f'{dst_prefix}.encoder.levels.{layer_idx}.layers.{block_idx}.layernorm_before'))
renamed_keys.extend(rename_keys_for_weight_bias(f'{src_prefix}.levels.{layer_idx}.blocks.{block_idx}.norm2', f'{dst_prefix}.encoder.levels.{layer_idx}.layers.{block_idx}.layernorm_after'))
renamed_keys.extend([(f'{src_prefix}.levels.{layer_idx}.blocks.{block_idx}.attn.rpb', f'{dst_prefix}.encoder.levels.{layer_idx}.layers.{block_idx}.attention.self.rpb')])
src_att_weight = src_state_dict[f'{src_prefix}.levels.{layer_idx}.blocks.{block_idx}.attn.qkv.weight']
src_att_bias = src_state_dict[f'{src_prefix}.levels.{layer_idx}.blocks.{block_idx}.attn.qkv.bias']
size = src_att_weight.shape[0]
offset = size // 3
dst_state_dict[f'{dst_prefix}.encoder.levels.{layer_idx}.layers.{block_idx}.attention.self.query.weight'] = src_att_weight[:offset, :]
dst_state_dict[f'{dst_prefix}.encoder.levels.{layer_idx}.layers.{block_idx}.attention.self.query.bias'] = src_att_bias[:offset]
dst_state_dict[f'{dst_prefix}.encoder.levels.{layer_idx}.layers.{block_idx}.attention.self.key.weight'] = src_att_weight[offset:offset * 2, :]
dst_state_dict[f'{dst_prefix}.encoder.levels.{layer_idx}.layers.{block_idx}.attention.self.key.bias'] = src_att_bias[offset:offset * 2]
dst_state_dict[f'{dst_prefix}.encoder.levels.{layer_idx}.layers.{block_idx}.attention.self.value.weight'] = src_att_weight[-offset:, :]
dst_state_dict[f'{dst_prefix}.encoder.levels.{layer_idx}.layers.{block_idx}.attention.self.value.bias'] = src_att_bias[-offset:]
src_state_dict.pop(f'{src_prefix}.levels.{layer_idx}.blocks.{block_idx}.attn.qkv.weight')
src_state_dict.pop(f'{src_prefix}.levels.{layer_idx}.blocks.{block_idx}.attn.qkv.bias')
renamed_keys.extend(rename_keys_for_weight_bias(f'{src_prefix}.levels.{layer_idx}.blocks.{block_idx}.attn.proj', f'{dst_prefix}.encoder.levels.{layer_idx}.layers.{block_idx}.attention.output.dense'))
renamed_keys.extend(rename_keys_for_weight_bias(f'{src_prefix}.levels.{layer_idx}.blocks.{block_idx}.mlp.fc1', f'{dst_prefix}.encoder.levels.{layer_idx}.layers.{block_idx}.intermediate.dense'))
renamed_keys.extend(rename_keys_for_weight_bias(f'{src_prefix}.levels.{layer_idx}.blocks.{block_idx}.mlp.fc2', f'{dst_prefix}.encoder.levels.{layer_idx}.layers.{block_idx}.output.dense'))
if layer_idx < num_layers - 1:
renamed_keys.extend([(f'{src_prefix}.levels.{layer_idx}.downsample.reduction.weight', f'{dst_prefix}.encoder.levels.{layer_idx}.downsample.reduction.weight'), (f'{src_prefix}.levels.{layer_idx}.downsample.norm.weight', f'{dst_prefix}.encoder.levels.{layer_idx}.downsample.norm.weight'), (f'{src_prefix}.levels.{layer_idx}.downsample.norm.bias', f'{dst_prefix}.encoder.levels.{layer_idx}.downsample.norm.bias')])
renamed_keys.extend([(f'{src_prefix}.norm{layer_idx}.weight', f'{dst_prefix}.hidden_states_norms.stage{layer_idx + 1}.weight'), (f'{src_prefix}.norm{layer_idx}.bias', f'{dst_prefix}.hidden_states_norms.stage{layer_idx + 1}.bias')])
self.pop_all(renamed_keys, dst_state_dict, src_state_dict)
def replace_pixel_module(self, dst_state_dict: StateDict, src_state_dict: StateDict, is_swin: bool):
dst_prefix: str = 'pixel_level_module.decoder'
src_prefix: str = 'sem_seg_head.pixel_decoder'
if is_swin:
self.replace_swin_backbone(dst_state_dict, src_state_dict, self.config)
else:
self.replace_dinat_backbone(dst_state_dict, src_state_dict, self.config)
def rename_keys_for_weight_bias(src_prefix: str, dst_prefix: str):
return [(f'{src_prefix}.weight', f'{dst_prefix}.weight'), (f'{src_prefix}.bias', f'{dst_prefix}.bias')]
def rename_keys_for_self_attn(src_prefix: str, dst_prefix: str):
self_attn_keys = []
self_attn_keys.extend(rename_keys_for_weight_bias(f'{src_prefix}.attention_weights', f'{dst_prefix}.attention_weights'))
self_attn_keys.extend(rename_keys_for_weight_bias(f'{src_prefix}.output_proj', f'{dst_prefix}.output_proj'))
self_attn_keys.extend(rename_keys_for_weight_bias(f'{src_prefix}.sampling_offsets', f'{dst_prefix}.sampling_offsets'))
self_attn_keys.extend(rename_keys_for_weight_bias(f'{src_prefix}.value_proj', f'{dst_prefix}.value_proj'))
return self_attn_keys
def rename_keys_for_encoder_layer(src_prefix: str, dst_prefix: str):
encoder_keys = []
encoder_keys.extend(rename_keys_for_weight_bias(f'{src_prefix}.linear1', f'{dst_prefix}.fc1'))
encoder_keys.extend(rename_keys_for_weight_bias(f'{src_prefix}.linear2', f'{dst_prefix}.fc2'))
encoder_keys.extend(rename_keys_for_weight_bias(f'{src_prefix}.norm1', f'{dst_prefix}.self_attn_layer_norm'))
encoder_keys.extend(rename_keys_for_weight_bias(f'{src_prefix}.norm2', f'{dst_prefix}.final_layer_norm'))
encoder_keys.extend(rename_keys_for_self_attn(f'{src_prefix}.self_attn', f'{dst_prefix}.self_attn'))
return encoder_keys
renamed_keys = [(f'{src_prefix}.adapter_1.weight', f'{dst_prefix}.adapter_1.0.weight'), (f'{src_prefix}.adapter_1.norm.weight', f'{dst_prefix}.adapter_1.1.weight'), (f'{src_prefix}.adapter_1.norm.bias', f'{dst_prefix}.adapter_1.1.bias')]
renamed_keys.extend([(f'{src_prefix}.layer_1.weight', f'{dst_prefix}.layer_1.0.weight'), (f'{src_prefix}.layer_1.norm.weight', f'{dst_prefix}.layer_1.1.weight'), (f'{src_prefix}.layer_1.norm.bias', f'{dst_prefix}.layer_1.1.bias')])
for i in range(3):
for j in range(2):
renamed_keys.extend([(f'{src_prefix}.input_proj.{i}.{j}.weight', f'{dst_prefix}.input_projections.{i}.{j}.weight'), (f'{src_prefix}.input_proj.{i}.{j}.bias', f'{dst_prefix}.input_projections.{i}.{j}.bias')])
renamed_keys.extend([(f'{src_prefix}.transformer.level_embed', f'{dst_prefix}.level_embed')])
for layer_idx in range(self.config.encoder_layers):
renamed_keys.extend(rename_keys_for_encoder_layer(f'{src_prefix}.transformer.encoder.layers.{layer_idx}', f'{dst_prefix}.encoder.layers.{layer_idx}'))
renamed_keys.extend([(f'{src_prefix}.mask_features.weight', f'{dst_prefix}.mask_projection.weight'), (f'{src_prefix}.mask_features.bias', f'{dst_prefix}.mask_projection.bias')])
self.pop_all(renamed_keys, dst_state_dict, src_state_dict)
def replace_keys_qkv_transformer_decoder(self, dst_state_dict: StateDict, src_state_dict: StateDict):
dst_prefix: str = 'transformer_module.decoder.layers'
src_prefix: str = 'sem_seg_head.predictor'
for i in range(self.config.decoder_layers - 1):
in_proj_weight = src_state_dict.pop(f'{src_prefix}.transformer_self_attention_layers.{i}.self_attn.in_proj_weight')
in_proj_bias = src_state_dict.pop(f'{src_prefix}.transformer_self_attention_layers.{i}.self_attn.in_proj_bias')
dst_state_dict[f'{dst_prefix}.{i}.self_attn.self_attn.q_proj.weight'] = in_proj_weight[:256, :]
dst_state_dict[f'{dst_prefix}.{i}.self_attn.self_attn.q_proj.bias'] = in_proj_bias[:256]
dst_state_dict[f'{dst_prefix}.{i}.self_attn.self_attn.k_proj.weight'] = in_proj_weight[256:512, :]
dst_state_dict[f'{dst_prefix}.{i}.self_attn.self_attn.k_proj.bias'] = in_proj_bias[256:512]
dst_state_dict[f'{dst_prefix}.{i}.self_attn.self_attn.v_proj.weight'] = in_proj_weight[-256:, :]
dst_state_dict[f'{dst_prefix}.{i}.self_attn.self_attn.v_proj.bias'] = in_proj_bias[-256:]
def replace_transformer_module(self, dst_state_dict: StateDict, src_state_dict: StateDict):
dst_prefix: str = 'transformer_module'
src_prefix: str = 'sem_seg_head.predictor'
def rename_keys_for_weight_bias(src_prefix: str, dst_prefix: str):
return [(f'{src_prefix}.weight', f'{dst_prefix}.weight'), (f'{src_prefix}.bias', f'{dst_prefix}.bias')]
def rename_keys_for_attn(src_prefix: str, dst_prefix: str):
attn_keys = [(f'{src_prefix}.in_proj_bias', f'{dst_prefix}.in_proj_bias'), (f'{src_prefix}.in_proj_weight', f'{dst_prefix}.in_proj_weight')]
attn_keys.extend(rename_keys_for_weight_bias(f'{src_prefix}.out_proj', f'{dst_prefix}.out_proj'))
return attn_keys
def rename_keys_for_self_attn(src_prefix: str, dst_prefix: str):
attn_keys = []
attn_keys.extend(rename_keys_for_weight_bias(f'{src_prefix}.out_proj', f'{dst_prefix}.out_proj'))
return attn_keys
def rename_keys_for_query_transformer_layer(src_prefix: str, dst_prefix: str):
query_transformer_layer_keys = []
query_transformer_layer_keys.extend(rename_keys_for_weight_bias(f'{src_prefix}.linear1', f'{dst_prefix}.linear1'))
query_transformer_layer_keys.extend(rename_keys_for_weight_bias(f'{src_prefix}.linear2', f'{dst_prefix}.linear2'))
query_transformer_layer_keys.extend(rename_keys_for_weight_bias(f'{src_prefix}.norm1', f'{dst_prefix}.norm1'))
query_transformer_layer_keys.extend(rename_keys_for_weight_bias(f'{src_prefix}.norm2', f'{dst_prefix}.norm2'))
query_transformer_layer_keys.extend(rename_keys_for_weight_bias(f'{src_prefix}.norm3', f'{dst_prefix}.norm3'))
query_transformer_layer_keys.extend(rename_keys_for_attn(f'{src_prefix}.self_attn', f'{dst_prefix}.self_attn'))
query_transformer_layer_keys.extend(rename_keys_for_attn(f'{src_prefix}.multihead_attn', f'{dst_prefix}.multihead_attn'))
return query_transformer_layer_keys
def rename_keys_for_cross_attn_layer(src_prefix: str, dst_prefix: str):
cross_attn_layer_keys = []
cross_attn_layer_keys.extend(rename_keys_for_weight_bias(f'{src_prefix}.norm', f'{dst_prefix}.norm'))
cross_attn_layer_keys.extend(rename_keys_for_attn(f'{src_prefix}.multihead_attn', f'{dst_prefix}.multihead_attn'))
return cross_attn_layer_keys
def rename_keys_for_self_attn_layer(src_prefix: str, dst_prefix: str):
self_attn_layer_keys = []
self_attn_layer_keys.extend(rename_keys_for_weight_bias(f'{src_prefix}.norm', f'{dst_prefix}.norm'))
self_attn_layer_keys.extend(rename_keys_for_self_attn(f'{src_prefix}.self_attn', f'{dst_prefix}.self_attn'))
return self_attn_layer_keys
def rename_keys_for_ffn_layer(src_prefix: str, dst_prefix: str):
ffn_layer_keys = []
ffn_layer_keys.extend(rename_keys_for_weight_bias(f'{src_prefix}.linear1', f'{dst_prefix}.linear1'))
ffn_layer_keys.extend(rename_keys_for_weight_bias(f'{src_prefix}.linear2', f'{dst_prefix}.linear2'))
ffn_layer_keys.extend(rename_keys_for_weight_bias(f'{src_prefix}.norm', f'{dst_prefix}.norm'))
return ffn_layer_keys
def rename_keys_for_transformer_decoder_layer(src_prefix: str, dst_prefix: str, idx: int):
transformer_decoder_layer_keys = []
transformer_decoder_layer_keys.extend(rename_keys_for_cross_attn_layer(f'{src_prefix}.transformer_cross_attention_layers.{idx}', f'{dst_prefix}.{idx}.cross_attn'))
transformer_decoder_layer_keys.extend(rename_keys_for_self_attn_layer(f'{src_prefix}.transformer_self_attention_layers.{idx}', f'{dst_prefix}.{idx}.self_attn'))
transformer_decoder_layer_keys.extend(rename_keys_for_ffn_layer(f'{src_prefix}.transformer_ffn_layers.{idx}', f'{dst_prefix}.{idx}.ffn'))
return transformer_decoder_layer_keys
renamed_keys = [(f'{src_prefix}.query_embed.weight', f'{dst_prefix}.queries_embedder.weight'), (f'{src_prefix}.level_embed.weight', f'{dst_prefix}.level_embed.weight')]
renamed_keys.extend(rename_keys_for_weight_bias(f'{src_prefix}.decoder_norm', f'{dst_prefix}.decoder.decoder_norm'))
renamed_keys.extend(rename_keys_for_weight_bias(f'{src_prefix}.class_input_proj', f'{dst_prefix}.decoder.query_input_projection'))
renamed_keys.extend(rename_keys_for_weight_bias(f'{src_prefix}.class_embed', f'{dst_prefix}.decoder.class_embed'))
for i in range(3):
renamed_keys.extend(rename_keys_for_weight_bias(f'{src_prefix}.mask_embed.layers.{i}', f'{dst_prefix}.decoder.mask_embed.layers.{i}.0'))
renamed_keys.extend(rename_keys_for_weight_bias(f'{src_prefix}.class_transformer.decoder.norm', f'{dst_prefix}.decoder.query_transformer.decoder.norm'))
for i in range(self.config.query_dec_layers):
renamed_keys.extend(rename_keys_for_query_transformer_layer(f'{src_prefix}.class_transformer.decoder.layers.{i}', f'{dst_prefix}.decoder.query_transformer.decoder.layers.{i}'))
for i in range(self.config.decoder_layers - 1):
renamed_keys.extend(rename_keys_for_transformer_decoder_layer(f'{src_prefix}', f'{dst_prefix}.decoder.layers', i))
self.pop_all(renamed_keys, dst_state_dict, src_state_dict)
self.replace_keys_qkv_transformer_decoder(dst_state_dict, src_state_dict)
def replace_task_mlp(self, dst_state_dict: StateDict, src_state_dict: StateDict):
dst_prefix: str = 'task_encoder'
src_prefix: str = 'task_mlp'
def rename_keys_for_weight_bias(src_prefix: str, dst_prefix: str):
return [(f'{src_prefix}.weight', f'{dst_prefix}.weight'), (f'{src_prefix}.bias', f'{dst_prefix}.bias')]
renamed_keys = []
for i in range(2):
renamed_keys.extend(rename_keys_for_weight_bias(f'{src_prefix}.layers.{i}', f'{dst_prefix}.task_mlp.layers.{i}.0'))
self.pop_all(renamed_keys, dst_state_dict, src_state_dict)
def replace_text_projector(self, dst_state_dict: StateDict, src_state_dict: StateDict):
dst_prefix: str = 'text_mapper.text_projector'
src_prefix: str = 'text_projector'
def rename_keys_for_weight_bias(src_prefix: str, dst_prefix: str):
return [(f'{src_prefix}.weight', f'{dst_prefix}.weight'), (f'{src_prefix}.bias', f'{dst_prefix}.bias')]
renamed_keys = []
for i in range(self.config.text_encoder_config['text_encoder_proj_layers']):
renamed_keys.extend(rename_keys_for_weight_bias(f'{src_prefix}.layers.{i}', f'{dst_prefix}.{i}.0'))
self.pop_all(renamed_keys, dst_state_dict, src_state_dict)
def replace_text_mapper(self, dst_state_dict: StateDict, src_state_dict: StateDict):
dst_prefix: str = 'text_mapper.text_encoder'
src_prefix: str = 'text_encoder'
self.replace_text_projector(dst_state_dict, src_state_dict)
def rename_keys_for_weight_bias(src_prefix: str, dst_prefix: str):
return [(f'{src_prefix}.weight', f'{dst_prefix}.weight'), (f'{src_prefix}.bias', f'{dst_prefix}.bias')]
def rename_keys_for_attn(src_prefix: str, dst_prefix: str):
attn_keys = [(f'{src_prefix}.in_proj_bias', f'{dst_prefix}.in_proj_bias'), (f'{src_prefix}.in_proj_weight', f'{dst_prefix}.in_proj_weight')]
attn_keys.extend(rename_keys_for_weight_bias(f'{src_prefix}.out_proj', f'{dst_prefix}.out_proj'))
return attn_keys
def rename_keys_for_layer(src_prefix: str, dst_prefix: str):
resblock_keys = []
resblock_keys.extend(rename_keys_for_weight_bias(f'{src_prefix}.mlp.c_fc', f'{dst_prefix}.mlp.fc1'))
resblock_keys.extend(rename_keys_for_weight_bias(f'{src_prefix}.mlp.c_proj', f'{dst_prefix}.mlp.fc2'))
resblock_keys.extend(rename_keys_for_weight_bias(f'{src_prefix}.ln_1', f'{dst_prefix}.layer_norm1'))
resblock_keys.extend(rename_keys_for_weight_bias(f'{src_prefix}.ln_2', f'{dst_prefix}.layer_norm2'))
resblock_keys.extend(rename_keys_for_attn(f'{src_prefix}.attn', f'{dst_prefix}.self_attn'))
return resblock_keys
renamed_keys = [('prompt_ctx.weight', 'text_mapper.prompt_ctx.weight')]
renamed_keys.extend([(f'{src_prefix}.positional_embedding', f'{dst_prefix}.positional_embedding'), (f'{src_prefix}.token_embedding.weight', f'{dst_prefix}.token_embedding.weight')])
renamed_keys.extend(rename_keys_for_weight_bias(f'{src_prefix}.ln_final', f'{dst_prefix}.ln_final'))
for i in range(self.config.text_encoder_config['text_encoder_num_layers']):
renamed_keys.extend(rename_keys_for_layer(f'{src_prefix}.transformer.resblocks.{i}', f'{dst_prefix}.transformer.layers.{i}'))
self.pop_all(renamed_keys, dst_state_dict, src_state_dict)
def convert(self, oneformer: OneFormerModel, is_swin: bool) -> OneFormerModel:
dst_state_dict = TrackedStateDict(oneformer.state_dict())
src_state_dict = self.original_model.state_dict()
self.replace_pixel_module(dst_state_dict, src_state_dict, is_swin)
self.replace_transformer_module(dst_state_dict, src_state_dict)
self.replace_task_mlp(dst_state_dict, src_state_dict)
if self.config.is_training:
self.replace_text_mapper(dst_state_dict, src_state_dict)
logger.info(f'Missed keys are {pformat(dst_state_dict.diff())}')
logger.info(f'Not copied keys are {pformat(src_state_dict.keys())}')
logger.info('🙌 Done')
oneformer.load_state_dict(dst_state_dict)
return oneformer
@staticmethod
def using_dirs(checkpoints_dir: Path, config_dir: Path) -> Iterator[tuple[object, Path, Path]]:
checkpoints: list[Path] = checkpoints_dir.glob('**/*.pth')
for checkpoint in checkpoints:
logger.info(f'💪 Converting {checkpoint.stem}')
config: Path = config_dir / f'{checkpoint.stem}.yaml'
yield (config, checkpoint)
|
class OriginalOneFormerCheckpointToOursConverter:
def __init__(self, original_model: nn.Module, config: OneFormerConfig):
pass
def pop_all(self, renamed_keys: list[tuple[str, str]], dst_state_dict: StateDict, src_state_dict: StateDict):
pass
def replace_swin_backbone(self, dst_state_dict: StateDict, src_state_dict: StateDict, config: OneFormerConfig):
pass
def replace_dinat_backbone(self, dst_state_dict: StateDict, src_state_dict: StateDict, config: OneFormerConfig):
pass
def rename_keys_for_weight_bias(src_prefix: str, dst_prefix: str):
pass
def replace_pixel_module(self, dst_state_dict: StateDict, src_state_dict: StateDict, is_swin: bool):
pass
def rename_keys_for_weight_bias(src_prefix: str, dst_prefix: str):
pass
def rename_keys_for_self_attn(src_prefix: str, dst_prefix: str):
pass
def rename_keys_for_encoder_layer(src_prefix: str, dst_prefix: str):
pass
def replace_keys_qkv_transformer_decoder(self, dst_state_dict: StateDict, src_state_dict: StateDict):
pass
def replace_transformer_module(self, dst_state_dict: StateDict, src_state_dict: StateDict):
pass
def rename_keys_for_weight_bias(src_prefix: str, dst_prefix: str):
pass
def rename_keys_for_attn(src_prefix: str, dst_prefix: str):
pass
def rename_keys_for_self_attn(src_prefix: str, dst_prefix: str):
pass
def rename_keys_for_query_transformer_layer(src_prefix: str, dst_prefix: str):
pass
def rename_keys_for_cross_attn_layer(src_prefix: str, dst_prefix: str):
pass
def rename_keys_for_self_attn_layer(src_prefix: str, dst_prefix: str):
pass
def rename_keys_for_ffn_layer(src_prefix: str, dst_prefix: str):
pass
def rename_keys_for_transformer_decoder_layer(src_prefix: str, dst_prefix: str, idx: int):
pass
def replace_task_mlp(self, dst_state_dict: StateDict, src_state_dict: StateDict):
pass
def rename_keys_for_weight_bias(src_prefix: str, dst_prefix: str):
pass
def replace_text_projector(self, dst_state_dict: StateDict, src_state_dict: StateDict):
pass
def rename_keys_for_weight_bias(src_prefix: str, dst_prefix: str):
pass
def replace_text_mapper(self, dst_state_dict: StateDict, src_state_dict: StateDict):
pass
def rename_keys_for_weight_bias(src_prefix: str, dst_prefix: str):
pass
def rename_keys_for_attn(src_prefix: str, dst_prefix: str):
pass
def rename_keys_for_layer(src_prefix: str, dst_prefix: str):
pass
def convert(self, oneformer: OneFormerModel, is_swin: bool) -> OneFormerModel:
pass
@staticmethod
def using_dirs(checkpoints_dir: Path, config_dir: Path) -> Iterator[tuple[object, Path, Path]]:
pass
| 31
| 0
| 29
| 4
| 24
| 1
| 2
| 0.06
| 0
| 9
| 1
| 0
| 11
| 2
| 12
| 12
| 693
| 112
| 549
| 98
| 518
| 34
| 232
| 97
| 202
| 5
| 0
| 2
| 50
|
4,265
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/oneformer/convert_to_hf_oneformer.py
|
transformers.models.oneformer.convert_to_hf_oneformer.OriginalOneFormerConfigToOursConverter
|
from transformers import CLIPTokenizer, DinatConfig, SwinConfig
from transformers.models.oneformer.modeling_oneformer import OneFormerConfig, OneFormerForUniversalSegmentation, OneFormerForUniversalSegmentationOutput, OneFormerModel, OneFormerModelOutput
class OriginalOneFormerConfigToOursConverter:
def __call__(self, original_config: object, is_swin: bool) -> OneFormerConfig:
model = original_config.MODEL
dataset_catalog = MetadataCatalog.get(original_config.DATASETS.TEST_PANOPTIC[0])
id2label = dict(enumerate(dataset_catalog.stuff_classes))
label2id = {label: idx for idx, label in id2label.items()}
if is_swin:
if model.SWIN.EMBED_DIM == 96:
backbone_config = SwinConfig.from_pretrained('microsoft/swin-tiny-patch4-window7-224', drop_path_rate=model.SWIN.DROP_PATH_RATE, out_features=['stage1', 'stage2', 'stage3', 'stage4'])
elif model.SWIN.EMBED_DIM == 192:
backbone_config = SwinConfig.from_pretrained('microsoft/swin-large-patch4-window12-384', drop_path_rate=model.SWIN.DROP_PATH_RATE, out_features=['stage1', 'stage2', 'stage3', 'stage4'])
else:
raise ValueError(f'embed dim {model.SWIN.EMBED_DIM} not supported for Swin!')
else:
backbone_config = DinatConfig.from_pretrained('shi-labs/dinat-large-11x11-in22k-in1k-384', dilations=model.DiNAT.DILATIONS, kernel_size=model.DiNAT.KERNEL_SIZE, out_features=['stage1', 'stage2', 'stage3', 'stage4'])
config: OneFormerConfig = OneFormerConfig(backbone_config=backbone_config, output_attentions=True, output_hidden_states=True, return_dict=True, ignore_value=model.SEM_SEG_HEAD.IGNORE_VALUE, num_classes=model.SEM_SEG_HEAD.NUM_CLASSES, num_queries=model.ONE_FORMER.NUM_OBJECT_QUERIES, no_object_weight=model.ONE_FORMER.NO_OBJECT_WEIGHT, class_weight=model.ONE_FORMER.CLASS_WEIGHT, mask_weight=model.ONE_FORMER.MASK_WEIGHT, dice_weight=model.ONE_FORMER.DICE_WEIGHT, contrastive_weight=model.ONE_FORMER.CONTRASTIVE_WEIGHT, contrastive_temperature=model.ONE_FORMER.CONTRASTIVE_TEMPERATURE, train_num_points=model.ONE_FORMER.TRAIN_NUM_POINTS, oversample_ratio=model.ONE_FORMER.OVERSAMPLE_RATIO, importance_sample_ratio=model.ONE_FORMER.IMPORTANCE_SAMPLE_RATIO, init_std=0.02, init_xavier_std=1.0, layer_norm_eps=1e-05, is_training=False, use_auxiliary_loss=model.ONE_FORMER.DEEP_SUPERVISION, output_auxiliary_logits=True, strides=[4, 8, 16, 32], task_seq_len=original_config.INPUT.TASK_SEQ_LEN, max_seq_len=original_config.INPUT.MAX_SEQ_LEN, text_encoder_width=model.TEXT_ENCODER.WIDTH, text_encoder_context_length=model.TEXT_ENCODER.CONTEXT_LENGTH, text_encoder_num_layers=model.TEXT_ENCODER.NUM_LAYERS, text_encoder_vocab_size=model.TEXT_ENCODER.VOCAB_SIZE, text_encoder_proj_layers=model.TEXT_ENCODER.PROJ_NUM_LAYERS, text_encoder_n_ctx=model.TEXT_ENCODER.N_CTX, conv_dim=model.SEM_SEG_HEAD.CONVS_DIM, mask_dim=model.SEM_SEG_HEAD.MASK_DIM, hidden_dim=model.ONE_FORMER.HIDDEN_DIM, norm=model.SEM_SEG_HEAD.NORM, encoder_layers=model.SEM_SEG_HEAD.TRANSFORMER_ENC_LAYERS, encoder_feedforward_dim=1024, decoder_layers=model.ONE_FORMER.DEC_LAYERS, use_task_norm=model.ONE_FORMER.USE_TASK_NORM, num_attention_heads=model.ONE_FORMER.NHEADS, dropout=model.ONE_FORMER.DROPOUT, dim_feedforward=model.ONE_FORMER.DIM_FEEDFORWARD, pre_norm=model.ONE_FORMER.PRE_NORM, enforce_input_proj=model.ONE_FORMER.ENFORCE_INPUT_PROJ, query_dec_layers=model.ONE_FORMER.CLASS_DEC_LAYERS, common_stride=model.SEM_SEG_HEAD.COMMON_STRIDE, id2label=id2label, label2id=label2id)
return config
|
class OriginalOneFormerConfigToOursConverter:
def __call__(self, original_config: object, is_swin: bool) -> OneFormerConfig:
pass
| 2
| 0
| 82
| 4
| 78
| 0
| 4
| 0
| 0
| 8
| 0
| 0
| 1
| 0
| 1
| 1
| 83
| 4
| 79
| 8
| 77
| 0
| 14
| 8
| 12
| 4
| 0
| 2
| 4
|
4,266
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/oneformer/convert_to_hf_oneformer.py
|
transformers.models.oneformer.convert_to_hf_oneformer.OriginalOneFormerConfigToProcessorConverter
|
from transformers import CLIPTokenizer, DinatConfig, SwinConfig
import torch
from transformers.models.oneformer.processing_oneformer import OneFormerProcessor
from transformers.models.oneformer.image_processing_oneformer import OneFormerImageProcessor
class OriginalOneFormerConfigToProcessorConverter:
def __call__(self, original_config: object, model_repo: str) -> OneFormerProcessor:
model = original_config.MODEL
model_input = original_config.INPUT
dataset_catalog = MetadataCatalog.get(original_config.DATASETS.TEST_PANOPTIC[0])
if 'ade20k' in model_repo:
class_info_file = 'ade20k_panoptic.json'
elif 'coco' in model_repo:
class_info_file = 'coco_panoptic.json'
elif 'cityscapes' in model_repo:
class_info_file = 'cityscapes_panoptic.json'
else:
raise ValueError('Invalid Dataset!')
image_processor = OneFormerImageProcessor(image_mean=(torch.tensor(model.PIXEL_MEAN) / 255).tolist(), image_std=(torch.tensor(model.PIXEL_STD) / 255).tolist(), size=model_input.MIN_SIZE_TEST, max_size=model_input.MAX_SIZE_TEST, num_labels=model.SEM_SEG_HEAD.NUM_CLASSES, ignore_index=dataset_catalog.ignore_label, class_info_file=class_info_file)
tokenizer = CLIPTokenizer.from_pretrained(model_repo)
return OneFormerProcessor(image_processor=image_processor, tokenizer=tokenizer, task_seq_length=original_config.INPUT.TASK_SEQ_LEN, max_seq_length=original_config.INPUT.MAX_SEQ_LEN)
|
class OriginalOneFormerConfigToProcessorConverter:
def __call__(self, original_config: object, model_repo: str) -> OneFormerProcessor:
pass
| 2
| 0
| 32
| 4
| 28
| 0
| 4
| 0
| 0
| 6
| 0
| 0
| 1
| 0
| 1
| 1
| 33
| 4
| 29
| 8
| 27
| 0
| 13
| 8
| 11
| 4
| 0
| 1
| 4
|
4,267
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/oneformer/convert_to_hf_oneformer.py
|
transformers.models.oneformer.convert_to_hf_oneformer.TrackedStateDict
|
from typing import Any
class TrackedStateDict:
def __init__(self, to_track: dict):
"""This class "tracks" a python dictionary by keeping track of which item is accessed.
Args:
to_track (Dict): The dictionary we wish to track
"""
self.to_track = to_track
self._seen: set[str] = set()
def __getitem__(self, key: str) -> Any:
return self.to_track[key]
def __setitem__(self, key: str, item: Any):
self._seen.add(key)
self.to_track[key] = item
def diff(self) -> list[str]:
"""This method returns a set difference between the keys in the tracked state dict and the one we have access so far.
This is an effective method to check if we have update all the keys
Returns:
list[str]: List of keys not yet updated
"""
return set(self.to_track.keys()) - self._seen
def copy(self) -> dict:
return self.to_track.copy()
|
class TrackedStateDict:
def __init__(self, to_track: dict):
'''This class "tracks" a python dictionary by keeping track of which item is accessed.
Args:
to_track (Dict): The dictionary we wish to track
'''
pass
def __getitem__(self, key: str) -> Any:
pass
def __setitem__(self, key: str, item: Any):
pass
def diff(self) -> list[str]:
'''This method returns a set difference between the keys in the tracked state dict and the one we have access so far.
This is an effective method to check if we have update all the keys
Returns:
list[str]: List of keys not yet updated
'''
pass
def copy(self) -> dict:
pass
| 6
| 2
| 5
| 0
| 2
| 2
| 1
| 0.77
| 0
| 3
| 0
| 0
| 5
| 2
| 5
| 5
| 29
| 6
| 13
| 8
| 7
| 10
| 13
| 8
| 7
| 1
| 0
| 0
| 5
|
4,268
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/oneformer/image_processing_oneformer.py
|
transformers.models.oneformer.image_processing_oneformer.OneFormerImageProcessor
|
import numpy as np
from ...utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, TensorType, filter_out_non_signature_kwargs, is_torch_available, is_torch_tensor, logging
from ...image_utils import ChannelDimension, ImageInput, PILImageResampling, get_image_size, infer_channel_dimension_format, is_scaled_image, make_flat_list_of_images, to_numpy_array, valid_images, validate_preprocess_arguments
from typing import Any, Optional, Union
from collections.abc import Iterable
from ...image_processing_utils import INIT_SERVICE_KWARGS, BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import PaddingMode, get_resize_output_image_size, pad, rescale, resize, to_channel_dimension_format
class OneFormerImageProcessor(BaseImageProcessor):
"""
Constructs a OneFormer image processor. The image processor can be used to prepare image(s), task input(s) and
optional text inputs and targets for the model.
This image processor inherits from [`BaseImageProcessor`] which contains most of the main methods. Users should
refer to this superclass for more information regarding those methods.
Args:
do_resize (`bool`, *optional*, defaults to `True`):
Whether to resize the input to a certain `size`.
size (`int`, *optional*, defaults to 800):
Resize the input to the given size. Only has an effect if `do_resize` is set to `True`. If size is a
sequence like `(width, height)`, output size will be matched to this. If size is an int, smaller edge of
the image will be matched to this number. i.e, if `height > width`, then image will be rescaled to `(size *
height / width, size)`.
resample (`int`, *optional*, defaults to `Resampling.BILINEAR`):
An optional resampling filter. This can be one of `PIL.Image.Resampling.NEAREST`,
`PIL.Image.Resampling.BOX`, `PIL.Image.Resampling.BILINEAR`, `PIL.Image.Resampling.HAMMING`,
`PIL.Image.Resampling.BICUBIC` or `PIL.Image.Resampling.LANCZOS`. Only has an effect if `do_resize` is set
to `True`.
do_rescale (`bool`, *optional*, defaults to `True`):
Whether to rescale the input to a certain `scale`.
rescale_factor (`float`, *optional*, defaults to `1/ 255`):
Rescale the input by the given factor. Only has an effect if `do_rescale` is set to `True`.
do_normalize (`bool`, *optional*, defaults to `True`):
Whether or not to normalize the input with mean and standard deviation.
image_mean (`int`, *optional*, defaults to `[0.485, 0.456, 0.406]`):
The sequence of means for each channel, to be used when normalizing images. Defaults to the ImageNet mean.
image_std (`int`, *optional*, defaults to `[0.229, 0.224, 0.225]`):
The sequence of standard deviations for each channel, to be used when normalizing images. Defaults to the
ImageNet std.
ignore_index (`int`, *optional*):
Label to be assigned to background pixels in segmentation maps. If provided, segmentation map pixels
denoted with 0 (background) will be replaced with `ignore_index`.
do_reduce_labels (`bool`, *optional*, defaults to `False`):
Whether or not to decrement all label values of segmentation maps by 1. Usually used for datasets where 0
is used for background, and background itself is not included in all classes of a dataset (e.g. ADE20k).
The background label will be replaced by `ignore_index`.
repo_path (`str`, *optional*, defaults to `"shi-labs/oneformer_demo"`):
Path to hub repo or local directory containing the JSON file with class information for the dataset.
If unset, will look for `class_info_file` in the current working directory.
class_info_file (`str`, *optional*):
JSON file containing class information for the dataset. See `shi-labs/oneformer_demo/cityscapes_panoptic.json` for an example.
num_text (`int`, *optional*):
Number of text entries in the text input list.
num_labels (`int`, *optional*):
The number of labels in the segmentation map.
"""
model_input_names = ['pixel_values', 'pixel_mask', 'task_inputs']
@filter_out_non_signature_kwargs(extra=['max_size', 'metadata', *INIT_SERVICE_KWARGS])
def __init__(self, do_resize: bool=True, size: Optional[dict[str, int]]=None, resample: PILImageResampling=PILImageResampling.BILINEAR, do_rescale: bool=True, rescale_factor: float=1 / 255, do_normalize: bool=True, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, ignore_index: Optional[int]=None, do_reduce_labels: bool=False, repo_path: Optional[str]='shi-labs/oneformer_demo', class_info_file: Optional[str]=None, num_text: Optional[int]=None, num_labels: Optional[int]=None, **kwargs):
super().__init__(**kwargs)
self._max_size = kwargs.pop('max_size', 1333)
size = size if size is not None else {'shortest_edge': 800, 'longest_edge': self._max_size}
size = get_size_dict(size, max_size=self._max_size, default_to_square=False)
if class_info_file is None:
raise ValueError('You must provide a `class_info_file`')
self.do_resize = do_resize
self.size = size
self.resample = resample
self.do_rescale = do_rescale
self.rescale_factor = rescale_factor
self.do_normalize = do_normalize
self.image_mean = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
self.image_std = image_std if image_std is not None else IMAGENET_DEFAULT_STD
self.ignore_index = ignore_index
self.do_reduce_labels = do_reduce_labels
self.class_info_file = class_info_file
self.repo_path = repo_path
self.metadata = prepare_metadata(load_metadata(repo_path, class_info_file))
self.num_text = num_text
self.num_labels = num_labels
def to_dict(self) -> dict[str, Any]:
"""
Serializes this instance to a Python dictionary. This method calls the superclass method and then removes the
`_max_size` attribute from the dictionary.
"""
image_processor_dict = super().to_dict()
image_processor_dict.pop('_max_size', None)
return image_processor_dict
@filter_out_non_signature_kwargs(extra=['max_size'])
def resize(self, image: np.ndarray, size: dict[str, int], resample: PILImageResampling=PILImageResampling.BILINEAR, data_format=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray:
"""
Resize the image to the given size. Size can be min_size (scalar) or `(height, width)` tuple. If size is an
int, smaller edge of the image will be matched to this number.
"""
max_size = kwargs.pop('max_size', None)
size = get_size_dict(size, max_size=max_size, default_to_square=False)
if 'shortest_edge' in size and 'longest_edge' in size:
size, max_size = (size['shortest_edge'], size['longest_edge'])
elif 'height' in size and 'width' in size:
size = (size['height'], size['width'])
max_size = None
else:
raise ValueError(f"Size must contain 'height' and 'width' keys or 'shortest_edge' and 'longest_edge' keys. Got {size.keys()}.")
size = get_oneformer_resize_output_image_size(image=image, size=size, max_size=max_size, default_to_square=False, input_data_format=input_data_format)
image = resize(image, size=size, resample=resample, data_format=data_format, input_data_format=input_data_format)
return image
def rescale(self, image: np.ndarray, rescale_factor: float, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray:
"""
Rescale the image by the given factor. image = image * rescale_factor.
Args:
image (`np.ndarray`):
Image to rescale.
rescale_factor (`float`):
The value to use for rescaling.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format for the output image. If unset, the channel dimension format of the input
image is used. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
input_data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format for the input image. If unset, is inferred from the input image. Can be
one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
"""
return rescale(image, rescale_factor, data_format=data_format, input_data_format=input_data_format)
def convert_segmentation_map_to_binary_masks(self, segmentation_map: 'np.ndarray', instance_id_to_semantic_id: Optional[dict[int, int]]=None, ignore_index: Optional[int]=None, do_reduce_labels: bool=False):
do_reduce_labels = do_reduce_labels if do_reduce_labels is not None else self.do_reduce_labels
ignore_index = ignore_index if ignore_index is not None else self.ignore_index
return convert_segmentation_map_to_binary_masks(segmentation_map=segmentation_map, instance_id_to_semantic_id=instance_id_to_semantic_id, ignore_index=ignore_index, do_reduce_labels=do_reduce_labels)
def __call__(self, images, task_inputs=None, segmentation_maps=None, **kwargs) -> BatchFeature:
return self.preprocess(images, task_inputs=task_inputs, segmentation_maps=segmentation_maps, **kwargs)
def _preprocess(self, image: ImageInput, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, resample: Optional[PILImageResampling]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None):
if do_resize:
image = self.resize(image, size=size, resample=resample, input_data_format=input_data_format)
if do_rescale:
image = self.rescale(image, rescale_factor=rescale_factor, input_data_format=input_data_format)
if do_normalize:
image = self.normalize(image, mean=image_mean, std=image_std, input_data_format=input_data_format)
return image
def _preprocess_image(self, image: ImageInput, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, resample: Optional[PILImageResampling]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray:
"""Preprocesses a single image."""
image = to_numpy_array(image)
if do_rescale and is_scaled_image(image):
logger.warning_once('It looks like you are trying to rescale already rescaled images. If the input images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again.')
if input_data_format is None:
input_data_format = infer_channel_dimension_format(image)
image = self._preprocess(image=image, do_resize=do_resize, size=size, resample=resample, do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, input_data_format=input_data_format)
if data_format is not None:
image = to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format)
return image
def _preprocess_mask(self, segmentation_map: ImageInput, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray:
"""Preprocesses a single mask."""
segmentation_map = to_numpy_array(segmentation_map)
if segmentation_map.ndim == 2:
added_channel_dim = True
segmentation_map = segmentation_map[None, ...]
input_data_format = ChannelDimension.FIRST
else:
added_channel_dim = False
if input_data_format is None:
input_data_format = infer_channel_dimension_format(segmentation_map, num_channels=1)
segmentation_map = self._preprocess(image=segmentation_map, do_resize=do_resize, resample=PILImageResampling.NEAREST, size=size, do_rescale=False, do_normalize=False, input_data_format=input_data_format)
if added_channel_dim:
segmentation_map = segmentation_map.squeeze(0)
return segmentation_map
@filter_out_non_signature_kwargs()
def preprocess(self, images: ImageInput, task_inputs: Optional[list[str]]=None, segmentation_maps: Optional[ImageInput]=None, instance_id_to_semantic_id: Optional[dict[int, int]]=None, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, resample: Optional[PILImageResampling]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, ignore_index: Optional[int]=None, do_reduce_labels: Optional[bool]=None, return_tensors: Optional[Union[str, TensorType]]=None, data_format: Union[str, ChannelDimension]=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> BatchFeature:
if task_inputs is None:
task_inputs = ['panoptic']
do_resize = do_resize if do_resize is not None else self.do_resize
size = size if size is not None else self.size
size = get_size_dict(size, default_to_square=False, max_size=self._max_size)
resample = resample if resample is not None else self.resample
do_rescale = do_rescale if do_rescale is not None else self.do_rescale
rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
do_normalize = do_normalize if do_normalize is not None else self.do_normalize
image_mean = image_mean if image_mean is not None else self.image_mean
image_std = image_std if image_std is not None else self.image_std
ignore_index = ignore_index if ignore_index is not None else self.ignore_index
do_reduce_labels = do_reduce_labels if do_reduce_labels is not None else self.do_reduce_labels
if not valid_images(images):
raise ValueError('Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, or torch.Tensor')
validate_preprocess_arguments(do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_resize=do_resize, size=size, resample=resample)
if segmentation_maps is not None and (not valid_images(segmentation_maps)):
raise ValueError('Invalid segmentation map type. Must be of type PIL.Image.Image, numpy.ndarray, or torch.Tensor')
images = make_flat_list_of_images(images)
if segmentation_maps is not None:
segmentation_maps = make_flat_list_of_images(segmentation_maps, expected_ndims=2)
if segmentation_maps is not None and len(images) != len(segmentation_maps):
raise ValueError('Images and segmentation maps must have the same length.')
images = [self._preprocess_image(image, do_resize=do_resize, size=size, resample=resample, do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, data_format=data_format, input_data_format=input_data_format) for image in images]
if segmentation_maps is not None:
segmentation_maps = [self._preprocess_mask(segmentation_map, do_resize, size, input_data_format=input_data_format) for segmentation_map in segmentation_maps]
encoded_inputs = self.encode_inputs(images, task_inputs, segmentation_maps, instance_id_to_semantic_id, ignore_index, do_reduce_labels, return_tensors, input_data_format=data_format)
return encoded_inputs
def _pad_image(self, image: np.ndarray, output_size: tuple[int, int], constant_values: Union[float, Iterable[float]]=0, data_format: Optional[ChannelDimension]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray:
"""
Pad an image with zeros to the given size.
"""
input_height, input_width = get_image_size(image, channel_dim=input_data_format)
output_height, output_width = output_size
pad_bottom = output_height - input_height
pad_right = output_width - input_width
padding = ((0, pad_bottom), (0, pad_right))
padded_image = pad(image, padding, mode=PaddingMode.CONSTANT, constant_values=constant_values, data_format=data_format, input_data_format=input_data_format)
return padded_image
def pad(self, images: list[np.ndarray], constant_values: Union[float, Iterable[float]]=0, return_pixel_mask: bool=True, return_tensors: Optional[Union[str, TensorType]]=None, data_format: Optional[ChannelDimension]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> BatchFeature:
"""
Pads a batch of images to the bottom and right of the image with zeros to the size of largest height and width
in the batch and optionally returns their corresponding pixel mask.
Args:
image (`np.ndarray`):
Image to pad.
constant_values (`float` or `Iterable[float]`, *optional*):
The value to use for the padding if `mode` is `"constant"`.
return_pixel_mask (`bool`, *optional*, defaults to `True`):
Whether to return a pixel mask.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the image. If not provided, it will be the same as the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
"""
pad_size = get_max_height_width(images, input_data_format=input_data_format)
padded_images = [self._pad_image(image, pad_size, constant_values=constant_values, data_format=data_format, input_data_format=input_data_format) for image in images]
data = {'pixel_values': padded_images}
if return_pixel_mask:
masks = [make_pixel_mask(image=image, output_size=pad_size, input_data_format=input_data_format) for image in images]
data['pixel_mask'] = masks
return BatchFeature(data=data, tensor_type=return_tensors)
def get_semantic_annotations(self, label, num_class_obj):
annotation_classes = label['classes']
annotation_masks = label['masks']
texts = ['a semantic photo'] * self.num_text
classes = []
masks = []
for idx in range(len(annotation_classes)):
class_id = annotation_classes[idx]
mask = annotation_masks[idx]
if not np.all(mask is False):
if class_id not in classes:
cls_name = self.metadata[str(class_id)]
classes.append(class_id)
masks.append(mask)
num_class_obj[cls_name] += 1
else:
idx = classes.index(class_id)
masks[idx] += mask
masks[idx] = np.clip(masks[idx], 0, 1)
num = 0
for i, cls_name in enumerate(self.metadata['class_names']):
if num_class_obj[cls_name] > 0:
for _ in range(num_class_obj[cls_name]):
if num >= len(texts):
break
texts[num] = f'a photo with a {cls_name}'
num += 1
classes = np.array(classes)
masks = np.array(masks)
return (classes, masks, texts)
def get_instance_annotations(self, label, num_class_obj):
annotation_classes = label['classes']
annotation_masks = label['masks']
texts = ['an instance photo'] * self.num_text
classes = []
masks = []
for idx in range(len(annotation_classes)):
class_id = annotation_classes[idx]
mask = annotation_masks[idx]
if class_id in self.metadata['thing_ids']:
if not np.all(mask is False):
cls_name = self.metadata[str(class_id)]
classes.append(class_id)
masks.append(mask)
num_class_obj[cls_name] += 1
num = 0
for i, cls_name in enumerate(self.metadata['class_names']):
if num_class_obj[cls_name] > 0:
for _ in range(num_class_obj[cls_name]):
if num >= len(texts):
break
texts[num] = f'a photo with a {cls_name}'
num += 1
classes = np.array(classes)
masks = np.array(masks)
return (classes, masks, texts)
def get_panoptic_annotations(self, label, num_class_obj):
annotation_classes = label['classes']
annotation_masks = label['masks']
texts = ['an panoptic photo'] * self.num_text
classes = []
masks = []
for idx in range(len(annotation_classes)):
class_id = annotation_classes[idx]
mask = annotation_masks[idx].data
if not np.all(mask is False):
cls_name = self.metadata[str(class_id)]
classes.append(class_id)
masks.append(mask)
num_class_obj[cls_name] += 1
num = 0
for i, cls_name in enumerate(self.metadata['class_names']):
if num_class_obj[cls_name] > 0:
for _ in range(num_class_obj[cls_name]):
if num >= len(texts):
break
texts[num] = f'a photo with a {cls_name}'
num += 1
classes = np.array(classes)
masks = np.array(masks)
return (classes, masks, texts)
def encode_inputs(self, pixel_values_list: list[ImageInput], task_inputs: list[str], segmentation_maps: Optional[ImageInput]=None, instance_id_to_semantic_id: Optional[Union[list[dict[int, int]], dict[int, int]]]=None, ignore_index: Optional[int]=None, do_reduce_labels: bool=False, return_tensors: Optional[Union[str, TensorType]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None):
"""
Pad images up to the largest image in a batch and create a corresponding `pixel_mask`.
OneFormer addresses semantic segmentation with a mask classification paradigm, thus input segmentation maps
will be converted to lists of binary masks and their respective labels. Let's see an example, assuming
`segmentation_maps = [[2,6,7,9]]`, the output will contain `mask_labels =
[[1,0,0,0],[0,1,0,0],[0,0,1,0],[0,0,0,1]]` (four binary masks) and `class_labels = [2,6,7,9]`, the labels for
each mask.
Args:
pixel_values_list (`list[ImageInput]`):
List of images (pixel values) to be padded. Each image should be a tensor of shape `(channels, height,
width)`.
task_inputs (`list[str]`):
List of task values.
segmentation_maps (`ImageInput`, *optional*):
The corresponding semantic segmentation maps with the pixel-wise annotations.
(`bool`, *optional*, defaults to `True`):
Whether or not to pad images up to the largest image in a batch and create a pixel mask.
If left to the default, will return a pixel mask that is:
- 1 for pixels that are real (i.e. **not masked**),
- 0 for pixels that are padding (i.e. **masked**).
instance_id_to_semantic_id (`list[dict[int, int]]` or `dict[int, int]`, *optional*):
A mapping between object instance ids and class ids. If passed, `segmentation_maps` is treated as an
instance segmentation map where each pixel represents an instance id. Can be provided as a single
dictionary with a global/dataset-level mapping or as a list of dictionaries (one per image), to map
instance ids in each image separately.
return_tensors (`str` or [`~file_utils.TensorType`], *optional*):
If set, will return tensors instead of NumPy arrays. If set to `'pt'`, return PyTorch `torch.Tensor`
objects.
input_data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred from the input
image.
Returns:
[`BatchFeature`]: A [`BatchFeature`] with the following fields:
- **pixel_values** -- Pixel values to be fed to a model.
- **pixel_mask** -- Pixel mask to be fed to a model (when `=True` or if `pixel_mask` is in
`self.model_input_names`).
- **mask_labels** -- Optional list of mask labels of shape `(labels, height, width)` to be fed to a model
(when `annotations` are provided).
- **class_labels** -- Optional list of class labels of shape `(labels)` to be fed to a model (when
`annotations` are provided). They identify the labels of `mask_labels`, e.g. the label of
`mask_labels[i][j]` if `class_labels[i][j]`.
- **text_inputs** -- Optional list of text string entries to be fed to a model (when `annotations` are
provided). They identify the binary masks present in the image.
"""
ignore_index = self.ignore_index if ignore_index is None else ignore_index
do_reduce_labels = self.do_reduce_labels if do_reduce_labels is None else do_reduce_labels
pixel_values_list = [to_numpy_array(pixel_values) for pixel_values in pixel_values_list]
if input_data_format is None:
input_data_format = infer_channel_dimension_format(pixel_values_list[0])
pad_size = get_max_height_width(pixel_values_list, input_data_format=input_data_format)
encoded_inputs = self.pad(pixel_values_list, return_tensors=return_tensors, input_data_format=input_data_format)
annotations = None
if segmentation_maps is not None:
segmentation_maps = map(np.array, segmentation_maps)
annotations = []
for idx, segmentation_map in enumerate(segmentation_maps):
if isinstance(instance_id_to_semantic_id, list):
instance_id = instance_id_to_semantic_id[idx]
else:
instance_id = instance_id_to_semantic_id
masks, classes = self.convert_segmentation_map_to_binary_masks(segmentation_map, instance_id, ignore_index=ignore_index, do_reduce_labels=do_reduce_labels)
annotations.append({'masks': masks, 'classes': classes})
if annotations is not None:
mask_labels = []
class_labels = []
text_inputs = []
num_class_obj = {}
for cls_name in self.metadata['class_names']:
num_class_obj[cls_name] = 0
for i, label in enumerate(annotations):
task = task_inputs[i]
if task == 'semantic':
classes, masks, texts = self.get_semantic_annotations(label, num_class_obj)
elif task == 'instance':
classes, masks, texts = self.get_instance_annotations(label, num_class_obj)
elif task == 'panoptic':
classes, masks, texts = self.get_panoptic_annotations(label, num_class_obj)
else:
raise ValueError(f'{task} was not expected, expected `semantic`, `instance` or `panoptic`')
masks = [mask[None, ...] for mask in masks]
masks = [self._pad_image(image=mask, output_size=pad_size, constant_values=ignore_index) for mask in masks]
masks = np.concatenate(masks, axis=0)
mask_labels.append(torch.from_numpy(masks))
class_labels.append(torch.from_numpy(classes).long())
text_inputs.append(texts)
encoded_inputs['mask_labels'] = mask_labels
encoded_inputs['class_labels'] = class_labels
encoded_inputs['text_inputs'] = text_inputs
encoded_inputs['task_inputs'] = [f'the task is {task_input}' for task_input in task_inputs]
return encoded_inputs
def post_process_semantic_segmentation(self, outputs, target_sizes: Optional[list[tuple[int, int]]]=None) -> 'torch.Tensor':
"""
Converts the output of [`MaskFormerForInstanceSegmentation`] into semantic segmentation maps. Only supports
PyTorch.
Args:
outputs ([`MaskFormerForInstanceSegmentation`]):
Raw outputs of the model.
target_sizes (`list[tuple[int, int]]`, *optional*):
List of length (batch_size), where each list item (`tuple[int, int]]`) corresponds to the requested
final size (height, width) of each prediction. If left to None, predictions will not be resized.
Returns:
`list[torch.Tensor]`:
A list of length `batch_size`, where each item is a semantic segmentation map of shape (height, width)
corresponding to the target_sizes entry (if `target_sizes` is specified). Each entry of each
`torch.Tensor` correspond to a semantic class id.
"""
class_queries_logits = outputs.class_queries_logits
masks_queries_logits = outputs.masks_queries_logits
masks_classes = class_queries_logits.softmax(dim=-1)[..., :-1]
masks_probs = masks_queries_logits.sigmoid()
segmentation = torch.einsum('bqc, bqhw -> bchw', masks_classes, masks_probs)
batch_size = class_queries_logits.shape[0]
if target_sizes is not None:
if batch_size != len(target_sizes):
raise ValueError('Make sure that you pass in as many target sizes as the batch dimension of the logits')
semantic_segmentation = []
for idx in range(batch_size):
resized_logits = torch.nn.functional.interpolate(segmentation[idx].unsqueeze(dim=0), size=target_sizes[idx], mode='bilinear', align_corners=False)
semantic_map = resized_logits[0].argmax(dim=0)
semantic_segmentation.append(semantic_map)
else:
semantic_segmentation = segmentation.argmax(dim=1)
semantic_segmentation = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
return semantic_segmentation
def post_process_instance_segmentation(self, outputs, task_type: str='instance', is_demo: bool=True, threshold: float=0.5, mask_threshold: float=0.5, overlap_mask_area_threshold: float=0.8, target_sizes: Optional[list[tuple[int, int]]]=None, return_coco_annotation: Optional[bool]=False):
"""
Converts the output of [`OneFormerForUniversalSegmentationOutput`] into image instance segmentation
predictions. Only supports PyTorch.
Args:
outputs ([`OneFormerForUniversalSegmentationOutput`]):
The outputs from [`OneFormerForUniversalSegmentationOutput`].
task_type (`str`, *optional*, defaults to "instance"):
The post processing depends on the task token input. If the `task_type` is "panoptic", we need to
ignore the stuff predictions.
is_demo (`bool`, *optional)*, defaults to `True`):
Whether the model is in demo mode. If true, use threshold to predict final masks.
threshold (`float`, *optional*, defaults to 0.5):
The probability score threshold to keep predicted instance masks.
mask_threshold (`float`, *optional*, defaults to 0.5):
Threshold to use when turning the predicted masks into binary values.
overlap_mask_area_threshold (`float`, *optional*, defaults to 0.8):
The overlap mask area threshold to merge or discard small disconnected parts within each binary
instance mask.
target_sizes (`list[Tuple]`, *optional*):
List of length (batch_size), where each list item (`tuple[int, int]]`) corresponds to the requested
final size (height, width) of each prediction in batch. If left to None, predictions will not be
resized.
return_coco_annotation (`bool`, *optional)*, defaults to `False`):
Whether to return predictions in COCO format.
Returns:
`list[Dict]`: A list of dictionaries, one per image, each dictionary containing two keys:
- **segmentation** -- a tensor of shape `(height, width)` where each pixel represents a `segment_id`, set
to `None` if no mask if found above `threshold`. If `target_sizes` is specified, segmentation is resized
to the corresponding `target_sizes` entry.
- **segments_info** -- A dictionary that contains additional information on each segment.
- **id** -- an integer representing the `segment_id`.
- **label_id** -- An integer representing the label / semantic class id corresponding to `segment_id`.
- **was_fused** -- a boolean, `True` if `label_id` was in `label_ids_to_fuse`, `False` otherwise.
Multiple instances of the same class / label were fused and assigned a single `segment_id`.
- **score** -- Prediction score of segment with `segment_id`.
"""
class_queries_logits = outputs.class_queries_logits
masks_queries_logits = outputs.masks_queries_logits
device = masks_queries_logits.device
batch_size = class_queries_logits.shape[0]
num_queries = class_queries_logits.shape[1]
num_classes = class_queries_logits.shape[-1] - 1
results: list[dict[str, torch.Tensor]] = []
for i in range(batch_size):
scores = torch.nn.functional.softmax(class_queries_logits[i], dim=-1)[:, :-1]
labels = torch.arange(num_classes, device=device).unsqueeze(0).repeat(num_queries, 1).flatten(0, 1)
scores_per_image, topk_indices = scores.flatten(0, 1).topk(num_queries, sorted=False)
labels_per_image = labels[topk_indices]
topk_indices = torch.div(topk_indices, num_classes, rounding_mode='floor')
mask_pred = masks_queries_logits[i][topk_indices]
if is_demo:
keep = scores_per_image > threshold
scores_per_image = scores_per_image[keep]
labels_per_image = labels_per_image[keep]
mask_pred = mask_pred[keep]
if task_type == 'panoptic':
keep = torch.zeros_like(scores_per_image).bool()
for j, lab in enumerate(labels_per_image):
keep[j] = lab in self.metadata['thing_ids']
scores_per_image = scores_per_image[keep]
labels_per_image = labels_per_image[keep]
mask_pred = mask_pred[keep]
if mask_pred.shape[0] <= 0:
height, width = target_sizes[i] if target_sizes is not None else mask_pred.shape[1:]
segmentation = torch.zeros((height, width)) - 1
results.append({'segmentation': segmentation, 'segments_info': []})
continue
if 'ade20k' in self.class_info_file and (not is_demo) and ('instance' in task_type):
for j in range(labels_per_image.shape[0]):
labels_per_image[j] = self.metadata['thing_ids'].index(labels_per_image[j].item())
target_size = target_sizes[i] if target_sizes is not None else None
segmentation, segments = compute_segments(mask_pred, scores_per_image, labels_per_image, mask_threshold, overlap_mask_area_threshold, set(), target_size)
if return_coco_annotation:
segmentation = convert_segmentation_to_rle(segmentation)
results.append({'segmentation': segmentation, 'segments_info': segments})
return results
def post_process_panoptic_segmentation(self, outputs, threshold: float=0.5, mask_threshold: float=0.5, overlap_mask_area_threshold: float=0.8, label_ids_to_fuse: Optional[set[int]]=None, target_sizes: Optional[list[tuple[int, int]]]=None) -> list[dict]:
"""
Converts the output of [`MaskFormerForInstanceSegmentationOutput`] into image panoptic segmentation
predictions. Only supports PyTorch.
Args:
outputs ([`MaskFormerForInstanceSegmentationOutput`]):
The outputs from [`MaskFormerForInstanceSegmentation`].
threshold (`float`, *optional*, defaults to 0.5):
The probability score threshold to keep predicted instance masks.
mask_threshold (`float`, *optional*, defaults to 0.5):
Threshold to use when turning the predicted masks into binary values.
overlap_mask_area_threshold (`float`, *optional*, defaults to 0.8):
The overlap mask area threshold to merge or discard small disconnected parts within each binary
instance mask.
label_ids_to_fuse (`Set[int]`, *optional*):
The labels in this state will have all their instances be fused together. For instance we could say
there can only be one sky in an image, but several persons, so the label ID for sky would be in that
set, but not the one for person.
target_sizes (`list[Tuple]`, *optional*):
List of length (batch_size), where each list item (`tuple[int, int]]`) corresponds to the requested
final size (height, width) of each prediction in batch. If left to None, predictions will not be
resized.
Returns:
`list[Dict]`: A list of dictionaries, one per image, each dictionary containing two keys:
- **segmentation** -- a tensor of shape `(height, width)` where each pixel represents a `segment_id`, set
to `None` if no mask if found above `threshold`. If `target_sizes` is specified, segmentation is resized
to the corresponding `target_sizes` entry.
- **segments_info** -- A dictionary that contains additional information on each segment.
- **id** -- an integer representing the `segment_id`.
- **label_id** -- An integer representing the label / semantic class id corresponding to `segment_id`.
- **was_fused** -- a boolean, `True` if `label_id` was in `label_ids_to_fuse`, `False` otherwise.
Multiple instances of the same class / label were fused and assigned a single `segment_id`.
- **score** -- Prediction score of segment with `segment_id`.
"""
if label_ids_to_fuse is None:
logger.warning('`label_ids_to_fuse` unset. No instance will be fused.')
label_ids_to_fuse = set()
class_queries_logits = outputs.class_queries_logits
masks_queries_logits = outputs.masks_queries_logits
batch_size = class_queries_logits.shape[0]
num_labels = class_queries_logits.shape[-1] - 1
mask_probs = masks_queries_logits.sigmoid()
pred_scores, pred_labels = nn.functional.softmax(class_queries_logits, dim=-1).max(-1)
results: list[dict[str, TensorType]] = []
for i in range(batch_size):
mask_probs_item, pred_scores_item, pred_labels_item = remove_low_and_no_objects(mask_probs[i], pred_scores[i], pred_labels[i], threshold, num_labels)
if mask_probs_item.shape[0] <= 0:
height, width = target_sizes[i] if target_sizes is not None else mask_probs_item.shape[1:]
segmentation = torch.zeros((height, width)) - 1
results.append({'segmentation': segmentation, 'segments_info': []})
continue
target_size = target_sizes[i] if target_sizes is not None else None
segmentation, segments = compute_segments(mask_probs=mask_probs_item, pred_scores=pred_scores_item, pred_labels=pred_labels_item, mask_threshold=mask_threshold, overlap_mask_area_threshold=overlap_mask_area_threshold, label_ids_to_fuse=label_ids_to_fuse, target_size=target_size)
results.append({'segmentation': segmentation, 'segments_info': segments})
return results
|
class OneFormerImageProcessor(BaseImageProcessor):
'''
Constructs a OneFormer image processor. The image processor can be used to prepare image(s), task input(s) and
optional text inputs and targets for the model.
This image processor inherits from [`BaseImageProcessor`] which contains most of the main methods. Users should
refer to this superclass for more information regarding those methods.
Args:
do_resize (`bool`, *optional*, defaults to `True`):
Whether to resize the input to a certain `size`.
size (`int`, *optional*, defaults to 800):
Resize the input to the given size. Only has an effect if `do_resize` is set to `True`. If size is a
sequence like `(width, height)`, output size will be matched to this. If size is an int, smaller edge of
the image will be matched to this number. i.e, if `height > width`, then image will be rescaled to `(size *
height / width, size)`.
resample (`int`, *optional*, defaults to `Resampling.BILINEAR`):
An optional resampling filter. This can be one of `PIL.Image.Resampling.NEAREST`,
`PIL.Image.Resampling.BOX`, `PIL.Image.Resampling.BILINEAR`, `PIL.Image.Resampling.HAMMING`,
`PIL.Image.Resampling.BICUBIC` or `PIL.Image.Resampling.LANCZOS`. Only has an effect if `do_resize` is set
to `True`.
do_rescale (`bool`, *optional*, defaults to `True`):
Whether to rescale the input to a certain `scale`.
rescale_factor (`float`, *optional*, defaults to `1/ 255`):
Rescale the input by the given factor. Only has an effect if `do_rescale` is set to `True`.
do_normalize (`bool`, *optional*, defaults to `True`):
Whether or not to normalize the input with mean and standard deviation.
image_mean (`int`, *optional*, defaults to `[0.485, 0.456, 0.406]`):
The sequence of means for each channel, to be used when normalizing images. Defaults to the ImageNet mean.
image_std (`int`, *optional*, defaults to `[0.229, 0.224, 0.225]`):
The sequence of standard deviations for each channel, to be used when normalizing images. Defaults to the
ImageNet std.
ignore_index (`int`, *optional*):
Label to be assigned to background pixels in segmentation maps. If provided, segmentation map pixels
denoted with 0 (background) will be replaced with `ignore_index`.
do_reduce_labels (`bool`, *optional*, defaults to `False`):
Whether or not to decrement all label values of segmentation maps by 1. Usually used for datasets where 0
is used for background, and background itself is not included in all classes of a dataset (e.g. ADE20k).
The background label will be replaced by `ignore_index`.
repo_path (`str`, *optional*, defaults to `"shi-labs/oneformer_demo"`):
Path to hub repo or local directory containing the JSON file with class information for the dataset.
If unset, will look for `class_info_file` in the current working directory.
class_info_file (`str`, *optional*):
JSON file containing class information for the dataset. See `shi-labs/oneformer_demo/cityscapes_panoptic.json` for an example.
num_text (`int`, *optional*):
Number of text entries in the text input list.
num_labels (`int`, *optional*):
The number of labels in the segmentation map.
'''
@filter_out_non_signature_kwargs(extra=['max_size', 'metadata', *INIT_SERVICE_KWARGS])
def __init__(self, do_resize: bool=True, size: Optional[dict[str, int]]=None, resample: PILImageResampling=PILImageResampling.BILINEAR, do_rescale: bool=True, rescale_factor: float=1 / 255, do_normalize: bool=True, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, ignore_index: Optional[int]=None, do_reduce_labels: bool=False, repo_path: Optional[str]='shi-labs/oneformer_demo', class_info_file: Optional[str]=None, num_text: Optional[int]=None, num_labels: Optional[int]=None, **kwargs):
pass
def to_dict(self) -> dict[str, Any]:
'''
Serializes this instance to a Python dictionary. This method calls the superclass method and then removes the
`_max_size` attribute from the dictionary.
'''
pass
@filter_out_non_signature_kwargs(extra=['max_size'])
def resize(self, image: np.ndarray, size: dict[str, int], resample: PILImageResampling=PILImageResampling.BILINEAR, data_format=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray:
'''
Resize the image to the given size. Size can be min_size (scalar) or `(height, width)` tuple. If size is an
int, smaller edge of the image will be matched to this number.
'''
pass
def rescale(self, image: np.ndarray, rescale_factor: float, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray:
'''
Rescale the image by the given factor. image = image * rescale_factor.
Args:
image (`np.ndarray`):
Image to rescale.
rescale_factor (`float`):
The value to use for rescaling.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format for the output image. If unset, the channel dimension format of the input
image is used. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
input_data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format for the input image. If unset, is inferred from the input image. Can be
one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
'''
pass
def convert_segmentation_map_to_binary_masks(self, segmentation_map: 'np.ndarray', instance_id_to_semantic_id: Optional[dict[int, int]]=None, ignore_index: Optional[int]=None, do_reduce_labels: bool=False):
pass
def __call__(self, images, task_inputs=None, segmentation_maps=None, **kwargs) -> BatchFeature:
pass
def _preprocess(self, image: ImageInput, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, resample: Optional[PILImageResampling]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None):
pass
def _preprocess_image(self, image: ImageInput, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, resample: Optional[PILImageResampling]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray:
'''Preprocesses a single image.'''
pass
def _preprocess_mask(self, segmentation_map: ImageInput, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray:
'''Preprocesses a single mask.'''
pass
@filter_out_non_signature_kwargs()
def preprocess(self, images: ImageInput, task_inputs: Optional[list[str]]=None, segmentation_maps: Optional[ImageInput]=None, instance_id_to_semantic_id: Optional[dict[int, int]]=None, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, resample: Optional[PILImageResampling]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, ignore_index: Optional[int]=None, do_reduce_labels: Optional[bool]=None, return_tensors: Optional[Union[str, TensorType]]=None, data_format: Union[str, ChannelDimension]=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> BatchFeature:
pass
def _pad_image(self, image: np.ndarray, output_size: tuple[int, int], constant_values: Union[float, Iterable[float]]=0, data_format: Optional[ChannelDimension]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray:
'''
Pad an image with zeros to the given size.
'''
pass
def pad(self, images: list[np.ndarray], constant_values: Union[float, Iterable[float]]=0, return_pixel_mask: bool=True, return_tensors: Optional[Union[str, TensorType]]=None, data_format: Optional[ChannelDimension]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> BatchFeature:
'''
Pads a batch of images to the bottom and right of the image with zeros to the size of largest height and width
in the batch and optionally returns their corresponding pixel mask.
Args:
image (`np.ndarray`):
Image to pad.
constant_values (`float` or `Iterable[float]`, *optional*):
The value to use for the padding if `mode` is `"constant"`.
return_pixel_mask (`bool`, *optional*, defaults to `True`):
Whether to return a pixel mask.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the image. If not provided, it will be the same as the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
'''
pass
def get_semantic_annotations(self, label, num_class_obj):
pass
def get_instance_annotations(self, label, num_class_obj):
pass
def get_panoptic_annotations(self, label, num_class_obj):
pass
def encode_inputs(self, pixel_values_list: list[ImageInput], task_inputs: list[str], segmentation_maps: Optional[ImageInput]=None, instance_id_to_semantic_id: Optional[Union[list[dict[int, int]], dict[int, int]]]=None, ignore_index: Optional[int]=None, do_reduce_labels: bool=False, return_tensors: Optional[Union[str, TensorType]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None):
'''
Pad images up to the largest image in a batch and create a corresponding `pixel_mask`.
OneFormer addresses semantic segmentation with a mask classification paradigm, thus input segmentation maps
will be converted to lists of binary masks and their respective labels. Let's see an example, assuming
`segmentation_maps = [[2,6,7,9]]`, the output will contain `mask_labels =
[[1,0,0,0],[0,1,0,0],[0,0,1,0],[0,0,0,1]]` (four binary masks) and `class_labels = [2,6,7,9]`, the labels for
each mask.
Args:
pixel_values_list (`list[ImageInput]`):
List of images (pixel values) to be padded. Each image should be a tensor of shape `(channels, height,
width)`.
task_inputs (`list[str]`):
List of task values.
segmentation_maps (`ImageInput`, *optional*):
The corresponding semantic segmentation maps with the pixel-wise annotations.
(`bool`, *optional*, defaults to `True`):
Whether or not to pad images up to the largest image in a batch and create a pixel mask.
If left to the default, will return a pixel mask that is:
- 1 for pixels that are real (i.e. **not masked**),
- 0 for pixels that are padding (i.e. **masked**).
instance_id_to_semantic_id (`list[dict[int, int]]` or `dict[int, int]`, *optional*):
A mapping between object instance ids and class ids. If passed, `segmentation_maps` is treated as an
instance segmentation map where each pixel represents an instance id. Can be provided as a single
dictionary with a global/dataset-level mapping or as a list of dictionaries (one per image), to map
instance ids in each image separately.
return_tensors (`str` or [`~file_utils.TensorType`], *optional*):
If set, will return tensors instead of NumPy arrays. If set to `'pt'`, return PyTorch `torch.Tensor`
objects.
input_data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred from the input
image.
Returns:
[`BatchFeature`]: A [`BatchFeature`] with the following fields:
- **pixel_values** -- Pixel values to be fed to a model.
- **pixel_mask** -- Pixel mask to be fed to a model (when `=True` or if `pixel_mask` is in
`self.model_input_names`).
- **mask_labels** -- Optional list of mask labels of shape `(labels, height, width)` to be fed to a model
(when `annotations` are provided).
- **class_labels** -- Optional list of class labels of shape `(labels)` to be fed to a model (when
`annotations` are provided). They identify the labels of `mask_labels`, e.g. the label of
`mask_labels[i][j]` if `class_labels[i][j]`.
- **text_inputs** -- Optional list of text string entries to be fed to a model (when `annotations` are
provided). They identify the binary masks present in the image.
'''
pass
def post_process_semantic_segmentation(self, outputs, target_sizes: Optional[list[tuple[int, int]]]=None) -> 'torch.Tensor':
'''
Converts the output of [`MaskFormerForInstanceSegmentation`] into semantic segmentation maps. Only supports
PyTorch.
Args:
outputs ([`MaskFormerForInstanceSegmentation`]):
Raw outputs of the model.
target_sizes (`list[tuple[int, int]]`, *optional*):
List of length (batch_size), where each list item (`tuple[int, int]]`) corresponds to the requested
final size (height, width) of each prediction. If left to None, predictions will not be resized.
Returns:
`list[torch.Tensor]`:
A list of length `batch_size`, where each item is a semantic segmentation map of shape (height, width)
corresponding to the target_sizes entry (if `target_sizes` is specified). Each entry of each
`torch.Tensor` correspond to a semantic class id.
'''
pass
def post_process_instance_segmentation(self, outputs, task_type: str='instance', is_demo: bool=True, threshold: float=0.5, mask_threshold: float=0.5, overlap_mask_area_threshold: float=0.8, target_sizes: Optional[list[tuple[int, int]]]=None, return_coco_annotation: Optional[bool]=False):
'''
Converts the output of [`OneFormerForUniversalSegmentationOutput`] into image instance segmentation
predictions. Only supports PyTorch.
Args:
outputs ([`OneFormerForUniversalSegmentationOutput`]):
The outputs from [`OneFormerForUniversalSegmentationOutput`].
task_type (`str`, *optional*, defaults to "instance"):
The post processing depends on the task token input. If the `task_type` is "panoptic", we need to
ignore the stuff predictions.
is_demo (`bool`, *optional)*, defaults to `True`):
Whether the model is in demo mode. If true, use threshold to predict final masks.
threshold (`float`, *optional*, defaults to 0.5):
The probability score threshold to keep predicted instance masks.
mask_threshold (`float`, *optional*, defaults to 0.5):
Threshold to use when turning the predicted masks into binary values.
overlap_mask_area_threshold (`float`, *optional*, defaults to 0.8):
The overlap mask area threshold to merge or discard small disconnected parts within each binary
instance mask.
target_sizes (`list[Tuple]`, *optional*):
List of length (batch_size), where each list item (`tuple[int, int]]`) corresponds to the requested
final size (height, width) of each prediction in batch. If left to None, predictions will not be
resized.
return_coco_annotation (`bool`, *optional)*, defaults to `False`):
Whether to return predictions in COCO format.
Returns:
`list[Dict]`: A list of dictionaries, one per image, each dictionary containing two keys:
- **segmentation** -- a tensor of shape `(height, width)` where each pixel represents a `segment_id`, set
to `None` if no mask if found above `threshold`. If `target_sizes` is specified, segmentation is resized
to the corresponding `target_sizes` entry.
- **segments_info** -- A dictionary that contains additional information on each segment.
- **id** -- an integer representing the `segment_id`.
- **label_id** -- An integer representing the label / semantic class id corresponding to `segment_id`.
- **was_fused** -- a boolean, `True` if `label_id` was in `label_ids_to_fuse`, `False` otherwise.
Multiple instances of the same class / label were fused and assigned a single `segment_id`.
- **score** -- Prediction score of segment with `segment_id`.
'''
pass
def post_process_panoptic_segmentation(self, outputs, threshold: float=0.5, mask_threshold: float=0.5, overlap_mask_area_threshold: float=0.8, label_ids_to_fuse: Optional[set[int]]=None, target_sizes: Optional[list[tuple[int, int]]]=None) -> list[dict]:
'''
Converts the output of [`MaskFormerForInstanceSegmentationOutput`] into image panoptic segmentation
predictions. Only supports PyTorch.
Args:
outputs ([`MaskFormerForInstanceSegmentationOutput`]):
The outputs from [`MaskFormerForInstanceSegmentation`].
threshold (`float`, *optional*, defaults to 0.5):
The probability score threshold to keep predicted instance masks.
mask_threshold (`float`, *optional*, defaults to 0.5):
Threshold to use when turning the predicted masks into binary values.
overlap_mask_area_threshold (`float`, *optional*, defaults to 0.8):
The overlap mask area threshold to merge or discard small disconnected parts within each binary
instance mask.
label_ids_to_fuse (`Set[int]`, *optional*):
The labels in this state will have all their instances be fused together. For instance we could say
there can only be one sky in an image, but several persons, so the label ID for sky would be in that
set, but not the one for person.
target_sizes (`list[Tuple]`, *optional*):
List of length (batch_size), where each list item (`tuple[int, int]]`) corresponds to the requested
final size (height, width) of each prediction in batch. If left to None, predictions will not be
resized.
Returns:
`list[Dict]`: A list of dictionaries, one per image, each dictionary containing two keys:
- **segmentation** -- a tensor of shape `(height, width)` where each pixel represents a `segment_id`, set
to `None` if no mask if found above `threshold`. If `target_sizes` is specified, segmentation is resized
to the corresponding `target_sizes` entry.
- **segments_info** -- A dictionary that contains additional information on each segment.
- **id** -- an integer representing the `segment_id`.
- **label_id** -- An integer representing the label / semantic class id corresponding to `segment_id`.
- **was_fused** -- a boolean, `True` if `label_id` was in `label_ids_to_fuse`, `False` otherwise.
Multiple instances of the same class / label were fused and assigned a single `segment_id`.
- **score** -- Prediction score of segment with `segment_id`.
'''
pass
| 23
| 12
| 45
| 4
| 30
| 11
| 5
| 0.45
| 1
| 16
| 3
| 0
| 19
| 16
| 20
| 40
| 979
| 111
| 603
| 280
| 443
| 273
| 332
| 144
| 311
| 17
| 3
| 4
| 105
|
4,269
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/oneformer/modeling_oneformer.py
|
transformers.models.oneformer.modeling_oneformer.OneFormerAttention
|
from torch import Tensor, nn
from typing import Optional, Union
import torch
class OneFormerAttention(nn.Module):
"""
Multi-headed attention from 'Attention Is All You Need' paper. Here, we add position embeddings to the queries and
keys (as explained in the DETR paper).
"""
def __init__(self, embed_dim: int, num_heads: int, dropout: float=0.0, is_decoder: bool=False, bias: bool=True):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
if self.head_dim * num_heads != self.embed_dim:
raise ValueError(f'embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {num_heads}).')
self.scaling = self.head_dim ** (-0.5)
self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
def _shape(self, tensor: torch.Tensor, seq_len: int, batch_size: int):
return tensor.view(batch_size, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
def with_pos_embed(self, tensor: torch.Tensor, position_embeddings: Optional[Tensor]):
return tensor if position_embeddings is None else tensor + position_embeddings
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, position_embeddings: Optional[torch.Tensor]=None, key_value_states: Optional[torch.Tensor]=None, key_value_position_embeddings: Optional[torch.Tensor]=None, output_attentions: bool=False) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
"""Input shape: Batch x Time x Channel"""
hidden_states = hidden_states.permute(1, 0, 2) if hidden_states is not None else None
position_embeddings = position_embeddings.permute(1, 0, 2) if position_embeddings is not None else None
key_value_states = key_value_states.permute(1, 0, 2) if key_value_states is not None else None
key_value_position_embeddings = key_value_position_embeddings.permute(1, 0, 2) if key_value_position_embeddings is not None else None
is_cross_attention = key_value_states is not None
batch_size, target_len, embed_dim = hidden_states.size()
if position_embeddings is not None:
hidden_states_original = hidden_states
hidden_states = self.with_pos_embed(hidden_states, position_embeddings)
if key_value_position_embeddings is not None:
key_value_states_original = key_value_states
key_value_states = self.with_pos_embed(key_value_states, key_value_position_embeddings)
query_states = self.q_proj(hidden_states) * self.scaling
if is_cross_attention:
key_states = self._shape(self.k_proj(key_value_states), -1, batch_size)
value_states = self._shape(self.v_proj(key_value_states_original), -1, batch_size)
else:
key_states = self._shape(self.k_proj(hidden_states), -1, batch_size)
value_states = self._shape(self.v_proj(hidden_states_original), -1, batch_size)
proj_shape = (batch_size * self.num_heads, -1, self.head_dim)
query_states = self._shape(query_states, target_len, batch_size).view(*proj_shape)
key_states = key_states.view(*proj_shape)
value_states = value_states.view(*proj_shape)
source_len = key_states.size(1)
attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
if attn_weights.size() != (batch_size * self.num_heads, target_len, source_len):
raise ValueError(f'Attention weights should be of size {(batch_size * self.num_heads, target_len, source_len)}, but is {attn_weights.size()}')
if attention_mask is not None:
if attention_mask.size() != (batch_size * self.num_heads, target_len, source_len):
raise ValueError(f'Attention mask should be of size {(target_len, batch_size * self.num_heads, source_len)}, but is {attention_mask.size()}')
attn_weights += attention_mask
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
if output_attentions:
attn_weights_reshaped = attn_weights.view(batch_size, self.num_heads, target_len, source_len)
attn_weights = attn_weights_reshaped.view(batch_size * self.num_heads, target_len, source_len)
else:
attn_weights_reshaped = None
attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
attn_output = torch.bmm(attn_probs, value_states)
if attn_output.size() != (batch_size * self.num_heads, target_len, self.head_dim):
raise ValueError(f'`attn_output` should be of size {(batch_size, self.num_heads, target_len, self.head_dim)}, but is {attn_output.size()}')
attn_output = attn_output.view(batch_size, self.num_heads, target_len, self.head_dim)
attn_output = attn_output.transpose(1, 2)
attn_output = attn_output.reshape(batch_size, target_len, embed_dim)
attn_output = self.out_proj(attn_output).permute(1, 0, 2)
return (attn_output, attn_weights_reshaped)
|
class OneFormerAttention(nn.Module):
'''
Multi-headed attention from 'Attention Is All You Need' paper. Here, we add position embeddings to the queries and
keys (as explained in the DETR paper).
'''
def __init__(self, embed_dim: int, num_heads: int, dropout: float=0.0, is_decoder: bool=False, bias: bool=True):
pass
def _shape(self, tensor: torch.Tensor, seq_len: int, batch_size: int):
pass
def with_pos_embed(self, tensor: torch.Tensor, position_embeddings: Optional[Tensor]):
pass
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, position_embeddings: Optional[torch.Tensor]=None, key_value_states: Optional[torch.Tensor]=None, key_value_position_embeddings: Optional[torch.Tensor]=None, output_attentions: bool=False) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
'''Input shape: Batch x Time x Channel'''
pass
| 5
| 2
| 31
| 5
| 23
| 3
| 5
| 0.18
| 1
| 6
| 0
| 0
| 4
| 9
| 4
| 14
| 134
| 23
| 94
| 42
| 74
| 17
| 63
| 27
| 58
| 13
| 1
| 2
| 18
|
4,270
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/oneformer/modeling_oneformer.py
|
transformers.models.oneformer.modeling_oneformer.OneFormerForUniversalSegmentation
|
from torch import Tensor, nn
from .configuration_oneformer import OneFormerConfig
from typing import Optional, Union
from ...utils import ModelOutput, auto_docstring, is_accelerate_available, is_scipy_available, logging, requires_backends
@auto_docstring(custom_intro='\n OneFormer Model for instance, semantic and panoptic image segmentation.\n ')
class OneFormerForUniversalSegmentation(OneFormerPreTrainedModel):
main_input_name = ['pixel_values', 'task_inputs']
def __init__(self, config: OneFormerConfig):
super().__init__(config)
self.model = OneFormerModel(config)
self.matcher = OneFormerHungarianMatcher(cost_class=config.class_weight, cost_dice=config.dice_weight, cost_mask=config.mask_weight, num_points=config.train_num_points)
self.weight_dict: dict[str, float] = {'loss_cross_entropy': config.class_weight, 'loss_mask': config.mask_weight, 'loss_dice': config.dice_weight, 'loss_contrastive': config.contrastive_weight}
self.criterion = OneFormerLoss(num_classes=config.num_labels, matcher=self.matcher, weight_dict=self.weight_dict, eos_coef=config.no_object_weight, num_points=config.train_num_points, oversample_ratio=config.oversample_ratio, importance_sample_ratio=config.importance_sample_ratio, contrastive_temperature=config.contrastive_temperature)
self.post_init()
def get_loss_dict(self, masks_queries_logits: Tensor, class_queries_logits: Tensor, contrastive_queries_logits: Tensor, mask_labels: Tensor, class_labels: Tensor, text_queries: Tensor, auxiliary_predictions: dict[str, Tensor], calculate_contrastive_loss: bool) -> dict[str, Tensor]:
loss_dict: dict[str, Tensor] = self.criterion(masks_queries_logits=masks_queries_logits, class_queries_logits=class_queries_logits, contrastive_queries_logits=contrastive_queries_logits, mask_labels=mask_labels, class_labels=class_labels, text_queries=text_queries, auxiliary_predictions=auxiliary_predictions, calculate_contrastive_loss=calculate_contrastive_loss)
for key, weight in self.weight_dict.items():
for loss_key, loss in loss_dict.items():
if key in loss_key:
loss *= weight
return loss_dict
def get_loss(self, loss_dict: dict[str, Tensor]) -> Tensor:
return sum(loss_dict.values())
@auto_docstring
def forward(self, pixel_values: Tensor, task_inputs: Tensor, text_inputs: Optional[Tensor]=None, mask_labels: Optional[list[Tensor]]=None, class_labels: Optional[list[Tensor]]=None, pixel_mask: Optional[Tensor]=None, output_auxiliary_logits: Optional[bool]=None, output_hidden_states: Optional[bool]=None, output_attentions: Optional[bool]=None, return_dict: Optional[bool]=None) -> OneFormerForUniversalSegmentationOutput:
"""
task_inputs (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):
Task inputs. Task inputs can be obtained using [`AutoImageProcessor`]. See [`OneFormerProcessor.__call__`]
for details.
text_inputs (`list[torch.Tensor]`, *optional*):
Tensor of shape `(num_queries, sequence_length)` to be fed to a model
mask_labels (`list[torch.Tensor]`, *optional*):
List of mask labels of shape `(num_labels, height, width)` to be fed to a model
class_labels (`list[torch.LongTensor]`, *optional*):
list of target class labels of shape `(num_labels, height, width)` to be fed to a model. They identify the
labels of `mask_labels`, e.g. the label of `mask_labels[i][j]` if `class_labels[i][j]`.
output_auxiliary_logits (`bool`, *optional*):
Whether or not to output auxiliary logits.
Example:
Universal segmentation example:
```python
>>> from transformers import OneFormerProcessor, OneFormerForUniversalSegmentation
>>> from PIL import Image
>>> import requests
>>> import torch
>>> # load OneFormer fine-tuned on ADE20k for universal segmentation
>>> processor = OneFormerProcessor.from_pretrained("shi-labs/oneformer_ade20k_swin_tiny")
>>> model = OneFormerForUniversalSegmentation.from_pretrained("shi-labs/oneformer_ade20k_swin_tiny")
>>> url = (
... "https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"
... )
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> # Semantic Segmentation
>>> inputs = processor(image, ["semantic"], return_tensors="pt")
>>> with torch.no_grad():
... outputs = model(**inputs)
>>> # model predicts class_queries_logits of shape `(batch_size, num_queries)`
>>> # and masks_queries_logits of shape `(batch_size, num_queries, height, width)`
>>> class_queries_logits = outputs.class_queries_logits
>>> masks_queries_logits = outputs.masks_queries_logits
>>> # you can pass them to processor for semantic postprocessing
>>> predicted_semantic_map = processor.post_process_semantic_segmentation(
... outputs, target_sizes=[(image.height, image.width)]
... )[0]
>>> f"👉 Semantic Predictions Shape: {list(predicted_semantic_map.shape)}"
'👉 Semantic Predictions Shape: [512, 683]'
>>> # Instance Segmentation
>>> inputs = processor(image, ["instance"], return_tensors="pt")
>>> with torch.no_grad():
... outputs = model(**inputs)
>>> # model predicts class_queries_logits of shape `(batch_size, num_queries)`
>>> # and masks_queries_logits of shape `(batch_size, num_queries, height, width)`
>>> class_queries_logits = outputs.class_queries_logits
>>> masks_queries_logits = outputs.masks_queries_logits
>>> # you can pass them to processor for instance postprocessing
>>> predicted_instance_map = processor.post_process_instance_segmentation(
... outputs, target_sizes=[(image.height, image.width)]
... )[0]["segmentation"]
>>> f"👉 Instance Predictions Shape: {list(predicted_instance_map.shape)}"
'👉 Instance Predictions Shape: [512, 683]'
>>> # Panoptic Segmentation
>>> inputs = processor(image, ["panoptic"], return_tensors="pt")
>>> with torch.no_grad():
... outputs = model(**inputs)
>>> # model predicts class_queries_logits of shape `(batch_size, num_queries)`
>>> # and masks_queries_logits of shape `(batch_size, num_queries, height, width)`
>>> class_queries_logits = outputs.class_queries_logits
>>> masks_queries_logits = outputs.masks_queries_logits
>>> # you can pass them to processor for panoptic postprocessing
>>> predicted_panoptic_map = processor.post_process_panoptic_segmentation(
... outputs, target_sizes=[(image.height, image.width)]
... )[0]["segmentation"]
>>> f"👉 Panoptic Predictions Shape: {list(predicted_panoptic_map.shape)}"
'👉 Panoptic Predictions Shape: [512, 683]'
```
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.model(pixel_values=pixel_values, task_inputs=task_inputs, text_inputs=text_inputs, pixel_mask=pixel_mask, output_hidden_states=output_hidden_states or self.config.use_auxiliary_loss, output_attentions=output_attentions, return_dict=True)
loss, loss_dict, auxiliary_predictions = (None, None, None)
class_queries_logits = outputs.transformer_decoder_class_predictions
masks_queries_logits = outputs.transformer_decoder_mask_predictions
contrastive_queries_logits = outputs.transformer_decoder_contrastive_queries
auxiliary_predictions = outputs.transformer_decoder_auxiliary_predictions
text_queries = outputs.text_queries
if mask_labels is not None and class_labels is not None:
loss_dict: dict[str, Tensor] = self.get_loss_dict(masks_queries_logits=masks_queries_logits, class_queries_logits=class_queries_logits, contrastive_queries_logits=contrastive_queries_logits, mask_labels=mask_labels, class_labels=class_labels, text_queries=text_queries, auxiliary_predictions=auxiliary_predictions, calculate_contrastive_loss=self.config.contrastive_temperature is not None)
loss = self.get_loss(loss_dict)
output_auxiliary_logits = self.config.output_auxiliary_logits if output_auxiliary_logits is None else output_auxiliary_logits
if not output_auxiliary_logits:
auxiliary_predictions = None
output = OneFormerForUniversalSegmentationOutput(class_queries_logits=class_queries_logits, masks_queries_logits=masks_queries_logits, auxiliary_predictions=auxiliary_predictions, loss=loss, **outputs)
if not return_dict:
output = tuple((v for v in output.values()))
if loss is not None:
output = loss + output
return output
|
@auto_docstring(custom_intro='\n OneFormer Model for instance, semantic and panoptic image segmentation.\n ')
class OneFormerForUniversalSegmentation(OneFormerPreTrainedModel):
def __init__(self, config: OneFormerConfig):
pass
def get_loss_dict(self, masks_queries_logits: Tensor, class_queries_logits: Tensor, contrastive_queries_logits: Tensor, mask_labels: Tensor, class_labels: Tensor, text_queries: Tensor, auxiliary_predictions: dict[str, Tensor], calculate_contrastive_loss: bool) -> dict[str, Tensor]:
pass
def get_loss_dict(self, masks_queries_logits: Tensor, class_queries_logits: Tensor, contrastive_queries_logits: Tensor, mask_labels: Tensor, class_labels: Tensor, text_queries: Tensor, auxiliary_predictions: dict[str, Tensor], calculate_contrastive_loss: bool) -> dict[str, Tensor]:
pass
@auto_docstring
def forward(self, pixel_values: Tensor, task_inputs: Tensor, text_inputs: Optional[Tensor]=None, mask_labels: Optional[list[Tensor]]=None, class_labels: Optional[list[Tensor]]=None, pixel_mask: Optional[Tensor]=None, output_auxiliary_logits: Optional[bool]=None, output_hidden_states: Optional[bool]=None, output_attentions: Optional[bool]=None, return_dict: Optional[bool]=None) -> OneFormerForUniversalSegmentationOutput:
'''
task_inputs (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):
Task inputs. Task inputs can be obtained using [`AutoImageProcessor`]. See [`OneFormerProcessor.__call__`]
for details.
text_inputs (`list[torch.Tensor]`, *optional*):
Tensor of shape `(num_queries, sequence_length)` to be fed to a model
mask_labels (`list[torch.Tensor]`, *optional*):
List of mask labels of shape `(num_labels, height, width)` to be fed to a model
class_labels (`list[torch.LongTensor]`, *optional*):
list of target class labels of shape `(num_labels, height, width)` to be fed to a model. They identify the
labels of `mask_labels`, e.g. the label of `mask_labels[i][j]` if `class_labels[i][j]`.
output_auxiliary_logits (`bool`, *optional*):
Whether or not to output auxiliary logits.
Example:
Universal segmentation example:
```python
>>> from transformers import OneFormerProcessor, OneFormerForUniversalSegmentation
>>> from PIL import Image
>>> import requests
>>> import torch
>>> # load OneFormer fine-tuned on ADE20k for universal segmentation
>>> processor = OneFormerProcessor.from_pretrained("shi-labs/oneformer_ade20k_swin_tiny")
>>> model = OneFormerForUniversalSegmentation.from_pretrained("shi-labs/oneformer_ade20k_swin_tiny")
>>> url = (
... "https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"
... )
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> # Semantic Segmentation
>>> inputs = processor(image, ["semantic"], return_tensors="pt")
>>> with torch.no_grad():
... outputs = model(**inputs)
>>> # model predicts class_queries_logits of shape `(batch_size, num_queries)`
>>> # and masks_queries_logits of shape `(batch_size, num_queries, height, width)`
>>> class_queries_logits = outputs.class_queries_logits
>>> masks_queries_logits = outputs.masks_queries_logits
>>> # you can pass them to processor for semantic postprocessing
>>> predicted_semantic_map = processor.post_process_semantic_segmentation(
... outputs, target_sizes=[(image.height, image.width)]
... )[0]
>>> f"👉 Semantic Predictions Shape: {list(predicted_semantic_map.shape)}"
'👉 Semantic Predictions Shape: [512, 683]'
>>> # Instance Segmentation
>>> inputs = processor(image, ["instance"], return_tensors="pt")
>>> with torch.no_grad():
... outputs = model(**inputs)
>>> # model predicts class_queries_logits of shape `(batch_size, num_queries)`
>>> # and masks_queries_logits of shape `(batch_size, num_queries, height, width)`
>>> class_queries_logits = outputs.class_queries_logits
>>> masks_queries_logits = outputs.masks_queries_logits
>>> # you can pass them to processor for instance postprocessing
>>> predicted_instance_map = processor.post_process_instance_segmentation(
... outputs, target_sizes=[(image.height, image.width)]
... )[0]["segmentation"]
>>> f"👉 Instance Predictions Shape: {list(predicted_instance_map.shape)}"
'👉 Instance Predictions Shape: [512, 683]'
>>> # Panoptic Segmentation
>>> inputs = processor(image, ["panoptic"], return_tensors="pt")
>>> with torch.no_grad():
... outputs = model(**inputs)
>>> # model predicts class_queries_logits of shape `(batch_size, num_queries)`
>>> # and masks_queries_logits of shape `(batch_size, num_queries, height, width)`
>>> class_queries_logits = outputs.class_queries_logits
>>> masks_queries_logits = outputs.masks_queries_logits
>>> # you can pass them to processor for panoptic postprocessing
>>> predicted_panoptic_map = processor.post_process_panoptic_segmentation(
... outputs, target_sizes=[(image.height, image.width)]
... )[0]["segmentation"]
>>> f"👉 Panoptic Predictions Shape: {list(predicted_panoptic_map.shape)}"
'👉 Panoptic Predictions Shape: [512, 683]'
```
'''
pass
| 7
| 1
| 53
| 7
| 29
| 17
| 4
| 0.57
| 1
| 11
| 5
| 0
| 4
| 4
| 4
| 5
| 221
| 32
| 120
| 43
| 91
| 69
| 41
| 20
| 36
| 9
| 2
| 3
| 15
|
4,271
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/oneformer/modeling_oneformer.py
|
transformers.models.oneformer.modeling_oneformer.OneFormerForUniversalSegmentationOutput
|
from ...utils import ModelOutput, auto_docstring, is_accelerate_available, is_scipy_available, logging, requires_backends
import torch
from typing import Optional, Union
from dataclasses import dataclass
@dataclass
@auto_docstring(custom_intro='\n Class for outputs of [`OneFormerForUniversalSegmentationOutput`].\n\n This output can be directly passed to [`~OneFormerImageProcessor.post_process_semantic_segmentation`] or\n [`~OneFormerImageProcessor.post_process_instance_segmentation`] or\n [`~OneFormerImageProcessor.post_process_panoptic_segmentation`] depending on the task. Please, see\n [`~OneFormerImageProcessor] for details regarding usage.\n ')
class OneFormerForUniversalSegmentationOutput(ModelOutput):
"""
loss (`torch.Tensor`, *optional*):
The computed loss, returned when labels are present.
class_queries_logits (`torch.FloatTensor`):
A tensor of shape `(batch_size, num_queries, num_labels + 1)` representing the proposed classes for each
query. Note the `+ 1` is needed because we incorporate the null class.
masks_queries_logits (`torch.FloatTensor`):
A tensor of shape `(batch_size, num_queries, height, width)` representing the proposed masks for each
query.
auxiliary_predictions (List of Dict of `str, torch.FloatTensor`, *optional*):
List of class and mask predictions from each layer of the transformer decoder.
encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, num_channels, height, width)`. Hidden-states (also called feature maps) of the encoder
model at the output of each stage.
pixel_decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, num_channels, height, width)`. Hidden-states (also called feature maps) of the pixel
decoder model at the output of each stage.
transformer_decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, sequence_length, hidden_size)`. Hidden-states (also called feature maps) of the
transformer decoder at the output of each stage.
transformer_decoder_object_queries (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_dim)`):
Output object queries from the last layer in the transformer decoder.
transformer_decoder_contrastive_queries (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_dim)`):
Contrastive queries from the transformer decoder.
transformer_decoder_mask_predictions (`torch.FloatTensor` of shape `(batch_size, num_queries, height, width)`):
Mask Predictions from the last layer in the transformer decoder.
transformer_decoder_class_predictions (`torch.FloatTensor` of shape `(batch_size, num_queries, num_classes+1)`):
Class Predictions from the last layer in the transformer decoder.
transformer_decoder_auxiliary_predictions (List of Dict of `str, torch.FloatTensor`, *optional*):
List of class and mask predictions from each layer of the transformer decoder.
text_queries (`torch.FloatTensor`, *optional* of shape `(batch_size, num_queries, hidden_dim)`):
Text queries derived from the input text list used for calculating contrastive loss during training.
task_token (`torch.FloatTensor` of shape `(batch_size, hidden_dim)`):
1D task token to condition the queries.
attentions (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `tuple(torch.FloatTensor)` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`. Self and Cross Attentions weights from transformer decoder.
"""
loss: Optional[torch.FloatTensor] = None
class_queries_logits: Optional[torch.FloatTensor] = None
masks_queries_logits: Optional[torch.FloatTensor] = None
auxiliary_predictions: list[dict[str, torch.FloatTensor]] = None
encoder_hidden_states: Optional[tuple[torch.FloatTensor]] = None
pixel_decoder_hidden_states: Optional[list[torch.FloatTensor]] = None
transformer_decoder_hidden_states: Optional[torch.FloatTensor] = None
transformer_decoder_object_queries: Optional[torch.FloatTensor] = None
transformer_decoder_contrastive_queries: Optional[torch.FloatTensor] = None
transformer_decoder_mask_predictions: Optional[torch.FloatTensor] = None
transformer_decoder_class_predictions: Optional[torch.FloatTensor] = None
transformer_decoder_auxiliary_predictions: Optional[list[dict[str, torch.FloatTensor]]] = None
text_queries: Optional[torch.FloatTensor] = None
task_token: Optional[torch.FloatTensor] = None
attentions: Optional[tuple[tuple[torch.FloatTensor]]] = None
|
@dataclass
@auto_docstring(custom_intro='\n Class for outputs of [`OneFormerForUniversalSegmentationOutput`].\n\n This output can be directly passed to [`~OneFormerImageProcessor.post_process_semantic_segmentation`] or\n [`~OneFormerImageProcessor.post_process_instance_segmentation`] or\n [`~OneFormerImageProcessor.post_process_panoptic_segmentation`] depending on the task. Please, see\n [`~OneFormerImageProcessor] for details regarding usage.\n ')
class OneFormerForUniversalSegmentationOutput(ModelOutput):
'''
loss (`torch.Tensor`, *optional*):
The computed loss, returned when labels are present.
class_queries_logits (`torch.FloatTensor`):
A tensor of shape `(batch_size, num_queries, num_labels + 1)` representing the proposed classes for each
query. Note the `+ 1` is needed because we incorporate the null class.
masks_queries_logits (`torch.FloatTensor`):
A tensor of shape `(batch_size, num_queries, height, width)` representing the proposed masks for each
query.
auxiliary_predictions (List of Dict of `str, torch.FloatTensor`, *optional*):
List of class and mask predictions from each layer of the transformer decoder.
encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, num_channels, height, width)`. Hidden-states (also called feature maps) of the encoder
model at the output of each stage.
pixel_decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, num_channels, height, width)`. Hidden-states (also called feature maps) of the pixel
decoder model at the output of each stage.
transformer_decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, sequence_length, hidden_size)`. Hidden-states (also called feature maps) of the
transformer decoder at the output of each stage.
transformer_decoder_object_queries (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_dim)`):
Output object queries from the last layer in the transformer decoder.
transformer_decoder_contrastive_queries (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_dim)`):
Contrastive queries from the transformer decoder.
transformer_decoder_mask_predictions (`torch.FloatTensor` of shape `(batch_size, num_queries, height, width)`):
Mask Predictions from the last layer in the transformer decoder.
transformer_decoder_class_predictions (`torch.FloatTensor` of shape `(batch_size, num_queries, num_classes+1)`):
Class Predictions from the last layer in the transformer decoder.
transformer_decoder_auxiliary_predictions (List of Dict of `str, torch.FloatTensor`, *optional*):
List of class and mask predictions from each layer of the transformer decoder.
text_queries (`torch.FloatTensor`, *optional* of shape `(batch_size, num_queries, hidden_dim)`):
Text queries derived from the input text list used for calculating contrastive loss during training.
task_token (`torch.FloatTensor` of shape `(batch_size, hidden_dim)`):
1D task token to condition the queries.
attentions (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `tuple(torch.FloatTensor)` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`. Self and Cross Attentions weights from transformer decoder.
'''
pass
| 3
| 1
| 0
| 0
| 0
| 0
| 0
| 2.94
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 66
| 3
| 16
| 16
| 15
| 47
| 16
| 16
| 15
| 0
| 1
| 0
| 0
|
4,272
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/oneformer/modeling_oneformer.py
|
transformers.models.oneformer.modeling_oneformer.OneFormerHungarianMatcher
|
from torch.cuda.amp import autocast
from torch import Tensor, nn
import numpy as np
import torch
class OneFormerHungarianMatcher(nn.Module):
def __init__(self, cost_class: float=1.0, cost_mask: float=1.0, cost_dice: float=1.0, num_points: int=12544):
"""This class computes an assignment between the labels and the predictions of the network.
For efficiency reasons, the labels don't include the no_object. Because of this, in general, there are more
predictions than labels. In this case, we do a 1-to-1 matching of the best predictions, while the others are
un-matched (and thus treated as non-objects).
Params:
cost_class (float, *optional*, defaults to 1.0):
This is the relative weight of the classification error in the matching cost.
cost_mask (float, *optional*, defaults to 1.0):
This is the relative weight of the sigmoid ce loss of the binary mask in the matching cost.
cost_dice (float, *optional*, defaults to 1.0):
This is the relative weight of the dice loss of the binary mask in the matching cost
num_points (int, *optional*, defaults to 12544):
Number of points to be sampled for dice and mask loss matching cost.
"""
super().__init__()
if cost_class == 0 and cost_mask == 0 and (cost_dice == 0):
raise ValueError("All costs can't be 0")
self.cost_class = cost_class
self.cost_mask = cost_mask
self.cost_dice = cost_dice
self.num_points = num_points
@torch.no_grad()
def forward(self, masks_queries_logits, class_queries_logits, mask_labels, class_labels) -> list[tuple[Tensor]]:
"""Performs the matching
Params:
masks_queries_logits (`torch.Tensor`):
A tensor` of dim `batch_size, num_queries, num_labels` with the
classification logits.
class_queries_logits (`torch.Tensor`):
A tensor` of dim `batch_size, num_queries, height, width` with the
predicted masks.
class_labels (`torch.Tensor`):
A tensor` of dim `num_target_boxes` (where num_target_boxes is the number
of ground-truth objects in the target) containing the class labels.
mask_labels (`torch.Tensor`):
A tensor` of dim `num_target_boxes, height, width` containing the target
masks.
Returns:
`list[tuple[Tensor]]`: A list of size batch_size, containing tuples of (index_i, index_j) where:
- index_i is the indices of the selected predictions (in order)
- index_j is the indices of the corresponding selected labels (in order)
For each batch element, it holds:
len(index_i) = len(index_j) = min(num_queries, num_targets).
"""
indices: list[tuple[np.array]] = []
num_queries = class_queries_logits.shape[1]
preds_masks = masks_queries_logits
preds_probs = class_queries_logits
for pred_probs, pred_mask, target_mask, labels in zip(preds_probs, preds_masks, mask_labels, class_labels):
pred_probs = pred_probs.softmax(-1)
cost_class = -pred_probs[:, labels]
pred_mask = pred_mask[:, None]
target_mask = target_mask[:, None].to(pred_mask.device)
point_coords = torch.rand(1, self.num_points, 2, device=pred_mask.device)
target_mask = sample_point(target_mask, point_coords.repeat(target_mask.shape[0], 1, 1), align_corners=False).squeeze(1)
pred_mask = sample_point(pred_mask, point_coords.repeat(pred_mask.shape[0], 1, 1), align_corners=False).squeeze(1)
with autocast(enabled=False):
pred_mask = pred_mask.float()
target_mask = target_mask.float()
cost_mask = pair_wise_sigmoid_cross_entropy_loss(pred_mask, target_mask)
cost_dice = pair_wise_dice_loss(pred_mask, target_mask)
cost_matrix = self.cost_mask * cost_mask + self.cost_class * cost_class + self.cost_dice * cost_dice
cost_matrix = cost_matrix.reshape(num_queries, -1).cpu()
assigned_indices: tuple[np.array] = linear_sum_assignment(cost_matrix.cpu())
indices.append(assigned_indices)
matched_indices = [(torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64)) for i, j in indices]
return matched_indices
|
class OneFormerHungarianMatcher(nn.Module):
def __init__(self, cost_class: float=1.0, cost_mask: float=1.0, cost_dice: float=1.0, num_points: int=12544):
'''This class computes an assignment between the labels and the predictions of the network.
For efficiency reasons, the labels don't include the no_object. Because of this, in general, there are more
predictions than labels. In this case, we do a 1-to-1 matching of the best predictions, while the others are
un-matched (and thus treated as non-objects).
Params:
cost_class (float, *optional*, defaults to 1.0):
This is the relative weight of the classification error in the matching cost.
cost_mask (float, *optional*, defaults to 1.0):
This is the relative weight of the sigmoid ce loss of the binary mask in the matching cost.
cost_dice (float, *optional*, defaults to 1.0):
This is the relative weight of the dice loss of the binary mask in the matching cost
num_points (int, *optional*, defaults to 12544):
Number of points to be sampled for dice and mask loss matching cost.
'''
pass
@torch.no_grad()
def forward(self, masks_queries_logits, class_queries_logits, mask_labels, class_labels) -> list[tuple[Tensor]]:
'''Performs the matching
Params:
masks_queries_logits (`torch.Tensor`):
A tensor` of dim `batch_size, num_queries, num_labels` with the
classification logits.
class_queries_logits (`torch.Tensor`):
A tensor` of dim `batch_size, num_queries, height, width` with the
predicted masks.
class_labels (`torch.Tensor`):
A tensor` of dim `num_target_boxes` (where num_target_boxes is the number
of ground-truth objects in the target) containing the class labels.
mask_labels (`torch.Tensor`):
A tensor` of dim `num_target_boxes, height, width` containing the target
masks.
Returns:
`list[tuple[Tensor]]`: A list of size batch_size, containing tuples of (index_i, index_j) where:
- index_i is the indices of the selected predictions (in order)
- index_j is the indices of the corresponding selected labels (in order)
For each batch element, it holds:
len(index_i) = len(index_j) = min(num_queries, num_targets).
'''
pass
| 4
| 2
| 52
| 7
| 22
| 23
| 2
| 1
| 1
| 7
| 0
| 0
| 2
| 4
| 2
| 12
| 107
| 15
| 46
| 22
| 40
| 46
| 33
| 19
| 30
| 2
| 1
| 2
| 4
|
4,273
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/oneformer/modeling_oneformer.py
|
transformers.models.oneformer.modeling_oneformer.OneFormerLoss
|
import torch
from torch import Tensor, nn
from typing import Optional, Union
from ...utils import ModelOutput, auto_docstring, is_accelerate_available, is_scipy_available, logging, requires_backends
import numpy as np
class OneFormerLoss(nn.Module):
def __init__(self, num_classes: int, matcher: OneFormerHungarianMatcher, weight_dict: dict[str, float], eos_coef: float, num_points: int, oversample_ratio: float, importance_sample_ratio: float, contrastive_temperature: Optional[float]=None):
"""
This class computes the losses using the class predictions, mask predictions and the contrastive queries.
Oneformer calculates the classification CE loss on the class predictions. Mask predictions are used for
calculating the binary CE loss and dice loss. The contrastive queries are used for calculating the contrastive
loss.
Args:
num_labels (`int`):
The number of classes.
matcher (`OneFormerHungarianMatcher`):
A torch module that computes the assignments between the predictions and labels.
weight_dict (`dict[str, float]`):
A dictionary of weights to be applied to the different losses.
eos_coef (`float`):
Weight to apply to the null class.
num_points (`int`):
Number of points to be sampled for dice and mask loss calculations.
oversample_ratio (`float`):
Required for pointwise loss calculation.
importance_sample_ratio (`float`):
Required for pointwise loss calculation.
contrastive_temperature (`float`):
Temperature for scaling the contrastive logits.
"""
requires_backends(self, ['scipy'])
super().__init__()
self.num_classes = num_classes
self.matcher = matcher
self.weight_dict = weight_dict
self.eos_coef = eos_coef
empty_weight = torch.ones(self.num_classes + 1)
empty_weight[-1] = self.eos_coef
self.register_buffer('empty_weight', empty_weight)
self.num_points = num_points
self.oversample_ratio = oversample_ratio
self.importance_sample_ratio = importance_sample_ratio
self.contrastive_temperature = contrastive_temperature
if self.contrastive_temperature is not None:
self.logit_scale = nn.Parameter(torch.tensor(np.log(1 / contrastive_temperature)))
def _max_by_axis(self, the_list: list[list[int]]) -> list[int]:
maxes = the_list[0]
for sublist in the_list[1:]:
for index, item in enumerate(sublist):
maxes[index] = max(maxes[index], item)
return maxes
def _pad_images_to_max_in_batch(self, tensors: list[Tensor]) -> tuple[Tensor, Tensor]:
max_size = self._max_by_axis([list(tensor.shape) for tensor in tensors])
batch_size = len(tensors)
batch_shape = [batch_size] + max_size
b, _, h, w = batch_shape
dtype = tensors[0].dtype
device = tensors[0].device
padded_tensors = torch.zeros(batch_shape, dtype=dtype, device=device)
padding_masks = torch.ones((b, h, w), dtype=torch.bool, device=device)
for tensor, padded_tensor, padding_mask in zip(tensors, padded_tensors, padding_masks):
padded_tensor[:tensor.shape[0], :tensor.shape[1], :tensor.shape[2]].copy_(tensor)
padding_mask[:tensor.shape[1], :tensor.shape[2]] = False
return (padded_tensors, padding_masks)
def loss_contrastive(self, contrastive_queries_logits: Tensor, text_queries: Tensor):
"""Compute the query-text contrastive loss.
Args:
contrastive_queries_logits (`torch.Tensor`):
A tensor of shape `batch_size, num_queries, hidden_dim`
text_queries (`torch.Tensor`):
A tensor of shape `batch_size, num_queries, hidden_dim`
Returns:
`dict[str, Tensor]`: A dict of `torch.Tensor` containing the following key:
- **loss_contrastive** -- The query-text contrastive loss computed using task-guided queries
and text queries derived from input text list.
"""
image_queries = contrastive_queries_logits.float()
image_queries = nn.functional.normalize(image_queries.flatten(1), dim=-1)
text_queries = nn.functional.normalize(text_queries.flatten(1), dim=-1)
logit_scale = torch.clamp(self.logit_scale.exp(), max=100)
logits_per_text = torch.matmul(text_queries, image_queries.t()) * logit_scale
logits_per_img = logits_per_text.t()
loss_img = nn.functional.cross_entropy(logits_per_img, torch.arange(len(logits_per_img), device=logits_per_text.device))
loss_text = nn.functional.cross_entropy(logits_per_text, torch.arange(len(logits_per_text), device=logits_per_text.device))
loss_contrastive = loss_img + loss_text
losses = {'loss_contrastive': loss_contrastive}
return losses
def loss_labels(self, class_queries_logits: Tensor, class_labels: list[Tensor], indices: tuple[np.array]) -> dict[str, Tensor]:
"""Compute the losses related to the labels using cross entropy.
Args:
class_queries_logits (`torch.Tensor`):
A tensor of shape `batch_size, num_queries, num_labels`
class_labels (`list[torch.Tensor]`):
List of class labels of shape `(labels)`.
indices (`tuple[np.array])`:
The indices computed by the Hungarian matcher.
Returns:
`dict[str, Tensor]`: A dict of `torch.Tensor` containing the following key:
- **loss_cross_entropy** -- The loss computed using cross entropy on the predicted and ground truth labels.
"""
pred_logits = class_queries_logits
batch_size, num_queries, _ = pred_logits.shape
criterion = nn.CrossEntropyLoss(weight=self.empty_weight)
idx = self._get_predictions_permutation_indices(indices)
target_classes_o = torch.cat([target[j] for target, (_, j) in zip(class_labels, indices)])
target_classes = torch.full((batch_size, num_queries), fill_value=self.num_classes, dtype=torch.int64, device=pred_logits.device)
target_classes[idx] = target_classes_o
pred_logits_transposed = pred_logits.transpose(1, 2)
loss_ce = criterion(pred_logits_transposed, target_classes)
losses = {'loss_cross_entropy': loss_ce}
return losses
def loss_masks(self, masks_queries_logits: Tensor, mask_labels: list[Tensor], indices: tuple[np.array], num_masks: int) -> dict[str, Tensor]:
"""Compute the losses related to the masks using focal and dice loss.
Args:
masks_queries_logits (`torch.Tensor`):
A tensor of shape `batch_size, num_queries, height, width`
mask_labels (`torch.Tensor`):
List of mask labels of shape `(labels, height, width)`.
indices (`tuple[np.array])`:
The indices computed by the Hungarian matcher.
num_masks (`int)`:
The number of masks, used for normalization.
Returns:
`dict[str, Tensor]`: A dict of `torch.Tensor` containing two keys:
- **loss_mask** -- The loss computed using sigmoid ce loss on the predicted and ground truth masks.
- **loss_dice** -- The loss computed using dice loss on the predicted on the predicted and ground truth
masks.
"""
src_idx = self._get_predictions_permutation_indices(indices)
tgt_idx = self._get_targets_permutation_indices(indices)
pred_masks = masks_queries_logits[src_idx]
target_masks, _ = self._pad_images_to_max_in_batch(mask_labels)
target_masks = target_masks[tgt_idx]
pred_masks = pred_masks[:, None]
target_masks = target_masks[:, None]
with torch.no_grad():
point_coords = self.sample_points_using_uncertainty(pred_masks, self.calculate_uncertainty, self.num_points, self.oversample_ratio, self.importance_sample_ratio)
point_labels = sample_point(target_masks, point_coords, align_corners=False).squeeze(1)
point_logits = sample_point(pred_masks, point_coords, align_corners=False).squeeze(1)
losses = {'loss_mask': sigmoid_cross_entropy_loss(point_logits, point_labels, num_masks), 'loss_dice': dice_loss(point_logits, point_labels, num_masks)}
del pred_masks
del target_masks
return losses
def calculate_uncertainty(self, logits: torch.Tensor) -> torch.Tensor:
"""
In Mask2Former paper, uncertainty is estimated as L1 distance between 0.0 and the logit prediction in 'logits'
for the foreground class in `classes`.
Args:
logits (`torch.Tensor`):
A tensor of shape (R, 1, ...) for class-specific or class-agnostic, where R is the total number of predicted masks in all images and C is:
the number of foreground classes. The values are logits.
Returns:
scores (`torch.Tensor`): A tensor of shape (R, 1, ...) that contains uncertainty scores with the most
uncertain locations having the highest uncertainty score.
"""
uncertainty_scores = -torch.abs(logits)
return uncertainty_scores
def sample_points_using_uncertainty(self, logits: torch.Tensor, uncertainty_function, num_points: int, oversample_ratio: int, importance_sample_ratio: float) -> torch.Tensor:
"""
This function is meant for sampling points in [0, 1] * [0, 1] coordinate space based on their uncertainty. The
uncertainty is calculated for each point using the passed `uncertainty function` that takes points logit
prediction as input.
Args:
logits (`float`):
Logit predictions for P points.
uncertainty_function:
A function that takes logit predictions for P points and returns their uncertainties.
num_points (`int`):
The number of points P to sample.
oversample_ratio (`int`):
Oversampling parameter.
importance_sample_ratio (`float`):
Ratio of points that are sampled via importance sampling.
Returns:
point_coordinates (`torch.Tensor`):
Coordinates for P sampled points.
"""
num_boxes = logits.shape[0]
num_points_sampled = int(num_points * oversample_ratio)
point_coordinates = torch.rand(num_boxes, num_points_sampled, 2, device=logits.device)
point_logits = sample_point(logits, point_coordinates, align_corners=False)
point_uncertainties = uncertainty_function(point_logits)
num_uncertain_points = int(importance_sample_ratio * num_points)
num_random_points = num_points - num_uncertain_points
idx = torch.topk(point_uncertainties[:, 0, :], k=num_uncertain_points, dim=1)[1]
shift = num_points_sampled * torch.arange(num_boxes, dtype=torch.long, device=logits.device)
idx += shift[:, None]
point_coordinates = point_coordinates.view(-1, 2)[idx.view(-1), :].view(num_boxes, num_uncertain_points, 2)
if num_random_points > 0:
point_coordinates = torch.cat([point_coordinates, torch.rand(num_boxes, num_random_points, 2, device=logits.device)], dim=1)
return point_coordinates
def _get_predictions_permutation_indices(self, indices):
batch_indices = torch.cat([torch.full_like(src, i) for i, (src, _) in enumerate(indices)])
predictions_indices = torch.cat([src for src, _ in indices])
return (batch_indices, predictions_indices)
def _get_targets_permutation_indices(self, indices):
batch_indices = torch.cat([torch.full_like(tgt, i) for i, (_, tgt) in enumerate(indices)])
target_indices = torch.cat([tgt for _, tgt in indices])
return (batch_indices, target_indices)
def forward(self, masks_queries_logits: Tensor, class_queries_logits: Tensor, contrastive_queries_logits: Tensor, mask_labels: list[Tensor], class_labels: list[Tensor], text_queries: Tensor, auxiliary_predictions: Optional[dict[str, Tensor]]=None, calculate_contrastive_loss: bool=True) -> dict[str, Tensor]:
"""
This performs the loss computation.
Args:
masks_queries_logits (`torch.Tensor`):
A tensor of shape `batch_size, num_queries, height, width`
class_queries_logits (`torch.Tensor`):
A tensor of shape `batch_size, num_queries, num_labels`
contrastive_queries_logits (`torch.Tensor`):
A tensor of shape `batch_size, num_queries, hidden_dim`
mask_labels (`torch.Tensor`):
List of mask labels of shape `(labels, height, width)`.
class_labels (`list[torch.Tensor]`):
List of class labels of shape `(labels)`.
text_queries (`torch.Tensor`):
A tensor of shape `batch_size, num_queries, hidden_dim`
auxiliary_predictions (`dict[str, torch.Tensor]`, *optional*):
if `use_auxiliary_loss` was set to `true` in [`OneFormerConfig`], then it contains the logits from the
inner layers of the Detr's Decoder.
calculate_contrastive_loss (`bool`, *optional*, defaults to `True`):
Whether or not to calculate the contrastive loss.
Returns:
`dict[str, Tensor]`: A dict of `torch.Tensor` containing two keys:
- **loss_cross_entropy** -- The loss computed using cross entropy on the predicted and ground truth labels.
- **loss_mask** -- The loss computed using sigmoid ce loss on the predicted and ground truth masks.
- **loss_dice** -- The loss computed using dice loss on the predicted on the predicted and ground truth
masks.
- **loss_contrastive** -- The query-text contrstive loss computed using object and text queries.
if `use_auxiliary_loss` was set to `true` in [`OneFormerConfig`], the dictionary contains additional losses
for each auxiliary predictions.
"""
indices = self.matcher(masks_queries_logits, class_queries_logits, mask_labels, class_labels)
num_masks = self.get_num_masks(class_labels, device=class_labels[0].device)
losses: dict[str, Tensor] = {**self.loss_masks(masks_queries_logits, mask_labels, indices, num_masks), **self.loss_labels(class_queries_logits, class_labels, indices)}
if calculate_contrastive_loss:
losses = {**losses, **self.loss_contrastive(contrastive_queries_logits, text_queries)}
if auxiliary_predictions is not None:
for idx, aux_outputs in enumerate(auxiliary_predictions):
masks_queries_logits = aux_outputs['masks_queries_logits']
class_queries_logits = aux_outputs['class_queries_logits']
loss_dict = self.forward(masks_queries_logits, class_queries_logits, None, mask_labels, class_labels, None, calculate_contrastive_loss=False)
loss_dict = {f'{key}_{idx}': value for key, value in loss_dict.items()}
losses.update(loss_dict)
return losses
def get_num_masks(self, class_labels: torch.Tensor, device: torch.device) -> torch.Tensor:
"""
Computes the average number of target masks across the batch, for normalization purposes.
"""
num_masks = sum([len(classes) for classes in class_labels])
num_masks = torch.as_tensor([num_masks], dtype=torch.float, device=device)
world_size = 1
if is_accelerate_available():
if PartialState._shared_state != {}:
num_masks = reduce(num_masks)
world_size = PartialState().num_processes
num_masks = torch.clamp(num_masks / world_size, min=1)
return num_masks
|
class OneFormerLoss(nn.Module):
def __init__(self, num_classes: int, matcher: OneFormerHungarianMatcher, weight_dict: dict[str, float], eos_coef: float, num_points: int, oversample_ratio: float, importance_sample_ratio: float, contrastive_temperature: Optional[float]=None):
'''
This class computes the losses using the class predictions, mask predictions and the contrastive queries.
Oneformer calculates the classification CE loss on the class predictions. Mask predictions are used for
calculating the binary CE loss and dice loss. The contrastive queries are used for calculating the contrastive
loss.
Args:
num_labels (`int`):
The number of classes.
matcher (`OneFormerHungarianMatcher`):
A torch module that computes the assignments between the predictions and labels.
weight_dict (`dict[str, float]`):
A dictionary of weights to be applied to the different losses.
eos_coef (`float`):
Weight to apply to the null class.
num_points (`int`):
Number of points to be sampled for dice and mask loss calculations.
oversample_ratio (`float`):
Required for pointwise loss calculation.
importance_sample_ratio (`float`):
Required for pointwise loss calculation.
contrastive_temperature (`float`):
Temperature for scaling the contrastive logits.
'''
pass
def _max_by_axis(self, the_list: list[list[int]]) -> list[int]:
pass
def _pad_images_to_max_in_batch(self, tensors: list[Tensor]) -> tuple[Tensor, Tensor]:
pass
def loss_contrastive(self, contrastive_queries_logits: Tensor, text_queries: Tensor):
'''Compute the query-text contrastive loss.
Args:
contrastive_queries_logits (`torch.Tensor`):
A tensor of shape `batch_size, num_queries, hidden_dim`
text_queries (`torch.Tensor`):
A tensor of shape `batch_size, num_queries, hidden_dim`
Returns:
`dict[str, Tensor]`: A dict of `torch.Tensor` containing the following key:
- **loss_contrastive** -- The query-text contrastive loss computed using task-guided queries
and text queries derived from input text list.
'''
pass
def loss_labels(self, class_queries_logits: Tensor, class_labels: list[Tensor], indices: tuple[np.array]) -> dict[str, Tensor]:
'''Compute the losses related to the labels using cross entropy.
Args:
class_queries_logits (`torch.Tensor`):
A tensor of shape `batch_size, num_queries, num_labels`
class_labels (`list[torch.Tensor]`):
List of class labels of shape `(labels)`.
indices (`tuple[np.array])`:
The indices computed by the Hungarian matcher.
Returns:
`dict[str, Tensor]`: A dict of `torch.Tensor` containing the following key:
- **loss_cross_entropy** -- The loss computed using cross entropy on the predicted and ground truth labels.
'''
pass
def loss_masks(self, masks_queries_logits: Tensor, mask_labels: list[Tensor], indices: tuple[np.array], num_masks: int) -> dict[str, Tensor]:
'''Compute the losses related to the masks using focal and dice loss.
Args:
masks_queries_logits (`torch.Tensor`):
A tensor of shape `batch_size, num_queries, height, width`
mask_labels (`torch.Tensor`):
List of mask labels of shape `(labels, height, width)`.
indices (`tuple[np.array])`:
The indices computed by the Hungarian matcher.
num_masks (`int)`:
The number of masks, used for normalization.
Returns:
`dict[str, Tensor]`: A dict of `torch.Tensor` containing two keys:
- **loss_mask** -- The loss computed using sigmoid ce loss on the predicted and ground truth masks.
- **loss_dice** -- The loss computed using dice loss on the predicted on the predicted and ground truth
masks.
'''
pass
def calculate_uncertainty(self, logits: torch.Tensor) -> torch.Tensor:
'''
In Mask2Former paper, uncertainty is estimated as L1 distance between 0.0 and the logit prediction in 'logits'
for the foreground class in `classes`.
Args:
logits (`torch.Tensor`):
A tensor of shape (R, 1, ...) for class-specific or class-agnostic, where R is the total number of predicted masks in all images and C is:
the number of foreground classes. The values are logits.
Returns:
scores (`torch.Tensor`): A tensor of shape (R, 1, ...) that contains uncertainty scores with the most
uncertain locations having the highest uncertainty score.
'''
pass
def sample_points_using_uncertainty(self, logits: torch.Tensor, uncertainty_function, num_points: int, oversample_ratio: int, importance_sample_ratio: float) -> torch.Tensor:
'''
This function is meant for sampling points in [0, 1] * [0, 1] coordinate space based on their uncertainty. The
uncertainty is calculated for each point using the passed `uncertainty function` that takes points logit
prediction as input.
Args:
logits (`float`):
Logit predictions for P points.
uncertainty_function:
A function that takes logit predictions for P points and returns their uncertainties.
num_points (`int`):
The number of points P to sample.
oversample_ratio (`int`):
Oversampling parameter.
importance_sample_ratio (`float`):
Ratio of points that are sampled via importance sampling.
Returns:
point_coordinates (`torch.Tensor`):
Coordinates for P sampled points.
'''
pass
def _get_predictions_permutation_indices(self, indices):
pass
def _get_targets_permutation_indices(self, indices):
pass
def forward(self, masks_queries_logits: Tensor, class_queries_logits: Tensor, contrastive_queries_logits: Tensor, mask_labels: list[Tensor], class_labels: list[Tensor], text_queries: Tensor, auxiliary_predictions: Optional[dict[str, Tensor]]=None, calculate_contrastive_loss: bool=True) -> dict[str, Tensor]:
'''
This performs the loss computation.
Args:
masks_queries_logits (`torch.Tensor`):
A tensor of shape `batch_size, num_queries, height, width`
class_queries_logits (`torch.Tensor`):
A tensor of shape `batch_size, num_queries, num_labels`
contrastive_queries_logits (`torch.Tensor`):
A tensor of shape `batch_size, num_queries, hidden_dim`
mask_labels (`torch.Tensor`):
List of mask labels of shape `(labels, height, width)`.
class_labels (`list[torch.Tensor]`):
List of class labels of shape `(labels)`.
text_queries (`torch.Tensor`):
A tensor of shape `batch_size, num_queries, hidden_dim`
auxiliary_predictions (`dict[str, torch.Tensor]`, *optional*):
if `use_auxiliary_loss` was set to `true` in [`OneFormerConfig`], then it contains the logits from the
inner layers of the Detr's Decoder.
calculate_contrastive_loss (`bool`, *optional*, defaults to `True`):
Whether or not to calculate the contrastive loss.
Returns:
`dict[str, Tensor]`: A dict of `torch.Tensor` containing two keys:
- **loss_cross_entropy** -- The loss computed using cross entropy on the predicted and ground truth labels.
- **loss_mask** -- The loss computed using sigmoid ce loss on the predicted and ground truth masks.
- **loss_dice** -- The loss computed using dice loss on the predicted on the predicted and ground truth
masks.
- **loss_contrastive** -- The query-text contrstive loss computed using object and text queries.
if `use_auxiliary_loss` was set to `true` in [`OneFormerConfig`], the dictionary contains additional losses
for each auxiliary predictions.
'''
pass
def get_num_masks(self, class_labels: torch.Tensor, device: torch.device) -> torch.Tensor:
'''
Computes the average number of target masks across the batch, for normalization purposes.
'''
pass
| 13
| 8
| 31
| 3
| 15
| 12
| 2
| 0.81
| 1
| 11
| 1
| 0
| 12
| 9
| 12
| 22
| 385
| 48
| 186
| 112
| 142
| 151
| 126
| 81
| 113
| 4
| 1
| 2
| 22
|
4,274
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/oneformer/modeling_oneformer.py
|
transformers.models.oneformer.modeling_oneformer.OneFormerMLPPredictionHead
|
from torch import Tensor, nn
class OneFormerMLPPredictionHead(nn.Module):
def __init__(self, input_dim: int, hidden_dim: int, output_dim: int, num_layers: int=3):
"""
A classic Multi Layer Perceptron (MLP).
Args:
input_dim (`int`):
The input dimensions.
hidden_dim (`int`):
The hidden dimensions.
output_dim (`int`):
The output dimensions.
num_layers (int, *optional*, defaults to 3):
The number of layers.
"""
super().__init__()
in_dims = [input_dim] + [hidden_dim] * (num_layers - 1)
out_dims = [hidden_dim] * (num_layers - 1) + [output_dim]
layers = []
for i, (in_dim, out_dim) in enumerate(zip(in_dims, out_dims)):
layers.append(PredictionBlock(in_dim, out_dim, activation=nn.ReLU() if i < num_layers - 1 else nn.Identity()))
self.layers = nn.Sequential(*layers)
def forward(self, input: Tensor) -> Tensor:
return self.layers(input)
|
class OneFormerMLPPredictionHead(nn.Module):
def __init__(self, input_dim: int, hidden_dim: int, output_dim: int, num_layers: int=3):
'''
A classic Multi Layer Perceptron (MLP).
Args:
input_dim (`int`):
The input dimensions.
hidden_dim (`int`):
The hidden dimensions.
output_dim (`int`):
The output dimensions.
num_layers (int, *optional*, defaults to 3):
The number of layers.
'''
pass
def forward(self, input: Tensor) -> Tensor:
pass
| 3
| 1
| 14
| 2
| 6
| 6
| 2
| 0.92
| 1
| 6
| 1
| 0
| 2
| 1
| 2
| 12
| 29
| 4
| 13
| 8
| 10
| 12
| 11
| 8
| 8
| 3
| 1
| 1
| 4
|
4,275
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/oneformer/modeling_oneformer.py
|
transformers.models.oneformer.modeling_oneformer.OneFormerModel
|
import torch
from torch import Tensor, nn
from typing import Optional, Union
from .configuration_oneformer import OneFormerConfig
from ...utils import ModelOutput, auto_docstring, is_accelerate_available, is_scipy_available, logging, requires_backends
@auto_docstring
class OneFormerModel(OneFormerPreTrainedModel):
main_input_name = ['pixel_values', 'task_inputs']
def __init__(self, config: OneFormerConfig):
super().__init__(config)
self.pixel_level_module = OneFormerPixelLevelModule(config)
self.transformer_module = OneFormerTransformerModule(in_features=config.conv_dim, config=config)
self.task_encoder = OneFormerTaskModel(config)
self.is_training = config.is_training
if self.is_training:
self.text_mapper = OneFormerTextMapper(config)
else:
self.text_mapper = None
self.post_init()
@auto_docstring
def forward(self, pixel_values: Tensor, task_inputs: Tensor, text_inputs: Optional[Tensor]=None, pixel_mask: Optional[Tensor]=None, output_hidden_states: Optional[bool]=None, output_attentions: Optional[bool]=None, return_dict: Optional[bool]=None) -> OneFormerModelOutput:
"""
task_inputs (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):
Task inputs. Task inputs can be obtained using [`AutoImageProcessor`]. See [`OneFormerProcessor.__call__`]
for details.
text_inputs (`list[torch.Tensor]`, *optional*):
Tensor of shape `(num_queries, sequence_length)` to be fed to a model
Example:
```python
>>> import torch
>>> from PIL import Image
>>> import requests
>>> from transformers import OneFormerProcessor, OneFormerModel
>>> # download texting image
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> # load processor for preprocessing the inputs
>>> processor = OneFormerProcessor.from_pretrained("shi-labs/oneformer_ade20k_swin_tiny")
>>> model = OneFormerModel.from_pretrained("shi-labs/oneformer_ade20k_swin_tiny")
>>> inputs = processor(image, ["semantic"], return_tensors="pt")
>>> with torch.no_grad():
... outputs = model(**inputs)
>>> mask_predictions = outputs.transformer_decoder_mask_predictions
>>> class_predictions = outputs.transformer_decoder_class_predictions
>>> f"👉 Mask Predictions Shape: {list(mask_predictions.shape)}, Class Predictions Shape: {list(class_predictions.shape)}"
'👉 Mask Predictions Shape: [1, 150, 128, 171], Class Predictions Shape: [1, 150, 151]'
```"""
if pixel_values is None:
raise ValueError('You have to specify pixel_values')
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
batch_size, _, height, width = pixel_values.shape
if pixel_mask is None:
pixel_mask = torch.ones((batch_size, height, width), device=pixel_values.device)
pixel_level_module_output = self.pixel_level_module(pixel_values, output_hidden_states)
multi_scale_features = pixel_level_module_output.decoder_features
mask_features = pixel_level_module_output.decoder_last_feature
task_token = self.task_encoder(task_inputs.to(self.dtype))
if self.is_training:
text_queries = self.text_mapper(text_inputs)
else:
text_queries = None
transformer_module_output = self.transformer_module(multi_scale_features=multi_scale_features, mask_features=mask_features, task_token=task_token, output_attentions=output_attentions)
queries = transformer_module_output.object_queries
encoder_hidden_states = None
pixel_decoder_hidden_states = None
transformer_decoder_hidden_states = None
if output_hidden_states:
encoder_hidden_states = pixel_level_module_output.encoder_features
pixel_decoder_hidden_states = (pixel_level_module_output.decoder_last_feature,)
for f in pixel_level_module_output.decoder_features:
pixel_decoder_hidden_states += (f,)
transformer_decoder_hidden_states = transformer_module_output.auxiliary_predictions
output = OneFormerModelOutput(encoder_hidden_states=encoder_hidden_states, pixel_decoder_hidden_states=pixel_decoder_hidden_states, transformer_decoder_hidden_states=transformer_decoder_hidden_states, transformer_decoder_object_queries=queries, transformer_decoder_contrastive_queries=transformer_module_output.contrastive_logits, transformer_decoder_mask_predictions=transformer_module_output.prediction_masks, transformer_decoder_class_predictions=transformer_module_output.prediction_class, transformer_decoder_auxiliary_predictions=transformer_module_output.auxiliary_predictions, text_queries=text_queries, task_token=task_token, attentions=transformer_module_output.attentions)
if not return_dict:
output = tuple((v for v in output.values()))
return output
|
@auto_docstring
class OneFormerModel(OneFormerPreTrainedModel):
def __init__(self, config: OneFormerConfig):
pass
@auto_docstring
def forward(self, pixel_values: Tensor, task_inputs: Tensor, text_inputs: Optional[Tensor]=None, pixel_mask: Optional[Tensor]=None, output_hidden_states: Optional[bool]=None, output_attentions: Optional[bool]=None, return_dict: Optional[bool]=None) -> OneFormerModelOutput:
'''
task_inputs (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):
Task inputs. Task inputs can be obtained using [`AutoImageProcessor`]. See [`OneFormerProcessor.__call__`]
for details.
text_inputs (`list[torch.Tensor]`, *optional*):
Tensor of shape `(num_queries, sequence_length)` to be fed to a model
Example:
```python
>>> import torch
>>> from PIL import Image
>>> import requests
>>> from transformers import OneFormerProcessor, OneFormerModel
>>> # download texting image
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> # load processor for preprocessing the inputs
>>> processor = OneFormerProcessor.from_pretrained("shi-labs/oneformer_ade20k_swin_tiny")
>>> model = OneFormerModel.from_pretrained("shi-labs/oneformer_ade20k_swin_tiny")
>>> inputs = processor(image, ["semantic"], return_tensors="pt")
>>> with torch.no_grad():
... outputs = model(**inputs)
>>> mask_predictions = outputs.transformer_decoder_mask_predictions
>>> class_predictions = outputs.transformer_decoder_class_predictions
>>> f"👉 Mask Predictions Shape: {list(mask_predictions.shape)}, Class Predictions Shape: {list(class_predictions.shape)}"
'👉 Mask Predictions Shape: [1, 150, 128, 171], Class Predictions Shape: [1, 150, 151]'
```'''
pass
| 5
| 1
| 59
| 12
| 36
| 12
| 6
| 0.31
| 1
| 11
| 6
| 0
| 2
| 5
| 2
| 3
| 123
| 25
| 75
| 32
| 61
| 23
| 43
| 22
| 40
| 10
| 2
| 2
| 12
|
4,276
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/oneformer/modeling_oneformer.py
|
transformers.models.oneformer.modeling_oneformer.OneFormerModelOutput
|
import torch
from typing import Optional, Union
from dataclasses import dataclass
from ...utils import ModelOutput, auto_docstring, is_accelerate_available, is_scipy_available, logging, requires_backends
@dataclass
@auto_docstring(custom_intro='\n Class for outputs of [`OneFormerModel`]. This class returns all the needed hidden states to compute the logits.\n ')
class OneFormerModelOutput(ModelOutput):
"""
encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, num_channels, height, width)`. Hidden-states (also called feature maps) of the encoder
model at the output of each stage.
pixel_decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, num_channels, height, width)`. Hidden-states (also called feature maps) of the pixel
decoder model at the output of each stage.
transformer_decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, sequence_length, hidden_size)`. Hidden-states (also called feature maps) of the
transformer decoder at the output of each stage.
transformer_decoder_object_queries (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_dim)`):
Output object queries from the last layer in the transformer decoder.
transformer_decoder_contrastive_queries (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_dim)`):
Contrastive queries from the transformer decoder.
transformer_decoder_mask_predictions (`torch.FloatTensor` of shape `(batch_size, num_queries, height, width)`):
Mask Predictions from the last layer in the transformer decoder.
transformer_decoder_class_predictions (`torch.FloatTensor` of shape `(batch_size, num_queries, num_classes+1)`):
Class Predictions from the last layer in the transformer decoder.
transformer_decoder_auxiliary_predictions (Tuple of Dict of `str, torch.FloatTensor`, *optional*):
Tuple of class and mask predictions from each layer of the transformer decoder.
text_queries (`torch.FloatTensor`, *optional* of shape `(batch_size, num_queries, hidden_dim)`):
Text queries derived from the input text list used for calculating contrastive loss during training.
task_token (`torch.FloatTensor` of shape `(batch_size, hidden_dim)`):
1D task token to condition the queries.
attentions (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `tuple(torch.FloatTensor)` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`. Self and Cross Attentions weights from transformer decoder.
"""
encoder_hidden_states: Optional[tuple[torch.FloatTensor]] = None
pixel_decoder_hidden_states: Optional[tuple[torch.FloatTensor]] = None
transformer_decoder_hidden_states: Optional[torch.FloatTensor] = None
transformer_decoder_object_queries: Optional[torch.FloatTensor] = None
transformer_decoder_contrastive_queries: Optional[torch.FloatTensor] = None
transformer_decoder_mask_predictions: Optional[torch.FloatTensor] = None
transformer_decoder_class_predictions: Optional[torch.FloatTensor] = None
transformer_decoder_auxiliary_predictions: Optional[tuple[dict[str, torch.FloatTensor]]] = None
text_queries: Optional[torch.FloatTensor] = None
task_token: Optional[torch.FloatTensor] = None
attentions: Optional[tuple[torch.FloatTensor]] = None
|
@dataclass
@auto_docstring(custom_intro='\n Class for outputs of [`OneFormerModel`]. This class returns all the needed hidden states to compute the logits.\n ')
class OneFormerModelOutput(ModelOutput):
'''
encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, num_channels, height, width)`. Hidden-states (also called feature maps) of the encoder
model at the output of each stage.
pixel_decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, num_channels, height, width)`. Hidden-states (also called feature maps) of the pixel
decoder model at the output of each stage.
transformer_decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, sequence_length, hidden_size)`. Hidden-states (also called feature maps) of the
transformer decoder at the output of each stage.
transformer_decoder_object_queries (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_dim)`):
Output object queries from the last layer in the transformer decoder.
transformer_decoder_contrastive_queries (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_dim)`):
Contrastive queries from the transformer decoder.
transformer_decoder_mask_predictions (`torch.FloatTensor` of shape `(batch_size, num_queries, height, width)`):
Mask Predictions from the last layer in the transformer decoder.
transformer_decoder_class_predictions (`torch.FloatTensor` of shape `(batch_size, num_queries, num_classes+1)`):
Class Predictions from the last layer in the transformer decoder.
transformer_decoder_auxiliary_predictions (Tuple of Dict of `str, torch.FloatTensor`, *optional*):
Tuple of class and mask predictions from each layer of the transformer decoder.
text_queries (`torch.FloatTensor`, *optional* of shape `(batch_size, num_queries, hidden_dim)`):
Text queries derived from the input text list used for calculating contrastive loss during training.
task_token (`torch.FloatTensor` of shape `(batch_size, hidden_dim)`):
1D task token to condition the queries.
attentions (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `tuple(torch.FloatTensor)` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`. Self and Cross Attentions weights from transformer decoder.
'''
pass
| 3
| 1
| 0
| 0
| 0
| 0
| 0
| 2.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 47
| 2
| 12
| 12
| 11
| 33
| 12
| 12
| 11
| 0
| 1
| 0
| 0
|
4,277
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/oneformer/modeling_oneformer.py
|
transformers.models.oneformer.modeling_oneformer.OneFormerPixelDecoder
|
from torch import Tensor, nn
import torch
from .configuration_oneformer import OneFormerConfig
import numpy as np
class OneFormerPixelDecoder(nn.Module):
def __init__(self, config: OneFormerConfig, feature_channels):
super().__init__()
self.config = config
self.position_embedding = OneFormerSinePositionEmbedding(num_pos_feats=config.conv_dim // 2, normalize=True)
self.num_feature_levels = 3
transformer_in_channels = feature_channels[-self.num_feature_levels:]
self.transformer_feature_strides = config.strides[-self.num_feature_levels:]
self.feature_channels = feature_channels
self.level_embed = nn.Parameter(torch.Tensor(self.num_feature_levels, config.conv_dim))
if self.num_feature_levels > 1:
input_projections_list = []
for in_channels in transformer_in_channels[::-1]:
input_projections_list.append(nn.Sequential(nn.Conv2d(in_channels, config.conv_dim, kernel_size=1), nn.GroupNorm(32, config.conv_dim)))
self.input_projections = nn.ModuleList(input_projections_list)
else:
self.input_projections = nn.ModuleList([nn.Sequential(nn.Conv2d(transformer_in_channels[-1], config.conv_dim, kernel_size=1), nn.GroupNorm(32, config.conv_dim))])
self.encoder = OneFormerPixelDecoderEncoderOnly(config)
self.mask_projection = nn.Conv2d(config.conv_dim, config.mask_dim, kernel_size=1, stride=1, padding=0)
self.common_stride = config.common_stride
stride = min(self.transformer_feature_strides)
self.num_fpn_levels = int(np.log2(stride) - np.log2(self.common_stride))
lateral_convs = []
output_convs = []
for idx, in_channels in enumerate(self.feature_channels[:self.num_fpn_levels]):
lateral_conv = nn.Sequential(nn.Conv2d(in_channels, config.conv_dim, kernel_size=1, bias=False), nn.GroupNorm(32, config.conv_dim))
output_conv = nn.Sequential(nn.Conv2d(config.conv_dim, config.conv_dim, kernel_size=3, stride=1, padding=1, bias=False), nn.GroupNorm(32, config.conv_dim), nn.ReLU())
self.add_module(f'adapter_{idx + 1}', lateral_conv)
self.add_module(f'layer_{idx + 1}', output_conv)
lateral_convs.append(lateral_conv)
output_convs.append(output_conv)
self.lateral_convs = lateral_convs[::-1]
self.output_convs = output_convs[::-1]
def get_valid_ratio(self, mask, dtype=torch.float32):
"""Get the valid ratio of all feature maps."""
_, height, width = mask.shape
valid_height = torch.sum(~mask[:, :, 0], 1)
valid_width = torch.sum(~mask[:, 0, :], 1)
valid_ratio_height = valid_height.to(dtype) / height
valid_ratio_width = valid_width.to(dtype) / width
valid_ratio = torch.stack([valid_ratio_width, valid_ratio_height], -1)
return valid_ratio
def forward(self, features, encoder_outputs=None, output_attentions=None, output_hidden_states=None, return_dict=None):
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
sources = []
position_embeddings_list = []
for level, source in enumerate(features[::-1][:self.num_feature_levels]):
sources.append(self.input_projections[level](source))
position_embeddings_list.append(self.position_embedding(source.shape, source.device, source.dtype))
masks = [torch.zeros((x.size(0), x.size(2), x.size(3)), device=x.device, dtype=torch.bool) for x in sources]
source_flatten = []
mask_flatten = []
lvl_pos_embed_flatten = []
spatial_shapes = []
for level, (source, mask, pos_embed) in enumerate(zip(sources, masks, position_embeddings_list)):
batch_size, num_channels, height, width = source.shape
spatial_shape = (height, width)
spatial_shapes.append(spatial_shape)
source = source.flatten(2).transpose(1, 2)
mask = mask.flatten(1)
pos_embed = pos_embed.flatten(2).transpose(1, 2)
lvl_pos_embed = pos_embed + self.level_embed[level].view(1, 1, -1)
lvl_pos_embed_flatten.append(lvl_pos_embed)
source_flatten.append(source)
mask_flatten.append(mask)
source_flatten = torch.cat(source_flatten, 1)
mask_flatten = torch.cat(mask_flatten, 1)
lvl_pos_embed_flatten = torch.cat(lvl_pos_embed_flatten, 1)
spatial_shapes = torch.as_tensor(spatial_shapes, dtype=torch.long, device=source_flatten.device)
level_start_index = torch.cat((spatial_shapes.new_zeros((1,)), spatial_shapes.prod(1).cumsum(0)[:-1]))
valid_ratios = torch.stack([self.get_valid_ratio(m, dtype=source_flatten.dtype) for m in masks], 1)
if encoder_outputs is None:
encoder_outputs = self.encoder(inputs_embeds=source_flatten, attention_mask=mask_flatten, position_embeddings=lvl_pos_embed_flatten, spatial_shapes=spatial_shapes, level_start_index=level_start_index, valid_ratios=valid_ratios, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
y = encoder_outputs.last_hidden_state
bs = y.shape[0]
split_size_or_sections = [None] * self.num_feature_levels
for i in range(self.num_feature_levels):
if i < self.num_feature_levels - 1:
split_size_or_sections[i] = level_start_index[i + 1] - level_start_index[i]
else:
split_size_or_sections[i] = y.shape[1] - level_start_index[i]
y = torch.split(y, split_size_or_sections, dim=1)
out = []
multi_scale_features = []
num_cur_levels = 0
for i, z in enumerate(y):
out.append(z.transpose(1, 2).view(bs, -1, spatial_shapes[i][0], spatial_shapes[i][1]))
for idx, feats in enumerate(features[:self.num_fpn_levels][::-1]):
lateral_conv = self.lateral_convs[idx]
output_conv = self.output_convs[idx]
cur_fpn = lateral_conv(feats)
y = cur_fpn + nn.functional.interpolate(out[-1], size=cur_fpn.shape[-2:], mode='bilinear', align_corners=False)
y = output_conv(y)
out.append(y)
for o in out:
if num_cur_levels < self.num_feature_levels:
multi_scale_features.append(o)
num_cur_levels += 1
return OneFormerPixelDecoderOutput(mask_features=self.mask_projection(out[-1]), multi_scale_features=multi_scale_features, attentions=encoder_outputs.attentions)
|
class OneFormerPixelDecoder(nn.Module):
def __init__(self, config: OneFormerConfig, feature_channels):
pass
def get_valid_ratio(self, mask, dtype=torch.float32):
'''Get the valid ratio of all feature maps.'''
pass
def forward(self, features, encoder_outputs=None, output_attentions=None, output_hidden_states=None, return_dict=None):
pass
| 4
| 1
| 65
| 7
| 53
| 4
| 6
| 0.08
| 1
| 11
| 4
| 0
| 3
| 13
| 3
| 13
| 197
| 23
| 161
| 66
| 150
| 13
| 97
| 59
| 93
| 12
| 1
| 2
| 17
|
4,278
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/oneformer/modeling_oneformer.py
|
transformers.models.oneformer.modeling_oneformer.OneFormerPixelDecoderEncoderLayer
|
from .configuration_oneformer import OneFormerConfig
from torch import Tensor, nn
import torch
from typing import Optional, Union
class OneFormerPixelDecoderEncoderLayer(nn.Module):
def __init__(self, config: OneFormerConfig):
super().__init__()
self.embed_dim = config.conv_dim
self.self_attn = OneFormerPixelDecoderEncoderMultiscaleDeformableAttention(embed_dim=self.embed_dim, num_heads=config.num_attention_heads, n_levels=3, n_points=4)
self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
self.dropout = config.dropout
self.activation_fn = nn.functional.relu
self.activation_dropout = config.dropout
self.fc1 = nn.Linear(self.embed_dim, config.encoder_feedforward_dim)
self.fc2 = nn.Linear(config.encoder_feedforward_dim, self.embed_dim)
self.final_layer_norm = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
self.is_training = config.is_training
def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, position_embeddings: Optional[torch.Tensor]=None, reference_points=None, spatial_shapes=None, level_start_index=None, output_attentions: bool=False):
"""
Args:
hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Input to the layer.
attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):
Attention mask.
position_embeddings (`torch.FloatTensor`, *optional*):
Position embeddings, to be added to `hidden_states`.
reference_points (`torch.FloatTensor`, *optional*):
Reference points.
spatial_shapes (`torch.LongTensor`, *optional*):
Spatial shapes of the backbone feature maps.
level_start_index (`torch.LongTensor`, *optional*):
Level start index.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
"""
residual = hidden_states
hidden_states, attn_weights = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, encoder_hidden_states=hidden_states, encoder_attention_mask=attention_mask, position_embeddings=position_embeddings, reference_points=reference_points, spatial_shapes=spatial_shapes, level_start_index=level_start_index, output_attentions=output_attentions)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.is_training)
hidden_states = residual + hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
residual = hidden_states
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.is_training)
hidden_states = self.fc2(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.is_training)
hidden_states = residual + hidden_states
hidden_states = self.final_layer_norm(hidden_states)
if self.is_training:
if torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any():
clamp_value = torch.finfo(hidden_states.dtype).max - 1000
hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights,)
return outputs
|
class OneFormerPixelDecoderEncoderLayer(nn.Module):
def __init__(self, config: OneFormerConfig):
pass
def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, position_embeddings: Optional[torch.Tensor]=None, reference_points=None, spatial_shapes=None, level_start_index=None, output_attentions: bool=False):
'''
Args:
hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Input to the layer.
attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):
Attention mask.
position_embeddings (`torch.FloatTensor`, *optional*):
Position embeddings, to be added to `hidden_states`.
reference_points (`torch.FloatTensor`, *optional*):
Reference points.
spatial_shapes (`torch.LongTensor`, *optional*):
Spatial shapes of the backbone feature maps.
level_start_index (`torch.LongTensor`, *optional*):
Level start index.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
'''
pass
| 3
| 1
| 44
| 6
| 29
| 10
| 3
| 0.33
| 1
| 5
| 2
| 0
| 2
| 10
| 2
| 12
| 89
| 12
| 58
| 26
| 46
| 19
| 34
| 17
| 31
| 4
| 1
| 2
| 5
|
4,279
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/oneformer/modeling_oneformer.py
|
transformers.models.oneformer.modeling_oneformer.OneFormerPixelDecoderEncoderMultiscaleDeformableAttention
|
import warnings
from torch import Tensor, nn
import torch
from typing import Optional, Union
class OneFormerPixelDecoderEncoderMultiscaleDeformableAttention(nn.Module):
"""
Multiscale deformable attention as proposed in Deformable DETR.
"""
def __init__(self, embed_dim: int, num_heads: int, n_levels: int, n_points: int):
super().__init__()
if embed_dim % num_heads != 0:
raise ValueError(f'embed_dim (d_model) must be divisible by num_heads, but got {embed_dim} and {num_heads}')
dim_per_head = embed_dim // num_heads
if not (dim_per_head & dim_per_head - 1 == 0 and dim_per_head != 0):
warnings.warn("You'd better set embed_dim (d_model) in DeformableDetrMultiscaleDeformableAttention to make the dimension of each attention head a power of 2 which is more efficient in the authors' CUDA implementation.")
self.im2col_step = 128
self.d_model = embed_dim
self.n_levels = n_levels
self.n_heads = num_heads
self.n_points = n_points
self.sampling_offsets = nn.Linear(embed_dim, num_heads * n_levels * n_points * 2)
self.attention_weights = nn.Linear(embed_dim, num_heads * n_levels * n_points)
self.value_proj = nn.Linear(embed_dim, embed_dim)
self.output_proj = nn.Linear(embed_dim, embed_dim)
def with_pos_embed(self, tensor: torch.Tensor, position_embeddings: Optional[Tensor]):
return tensor if position_embeddings is None else tensor + position_embeddings
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states=None, encoder_attention_mask=None, position_embeddings: Optional[torch.Tensor]=None, reference_points=None, spatial_shapes=None, level_start_index=None, output_attentions: bool=False):
if position_embeddings is not None:
hidden_states = self.with_pos_embed(hidden_states, position_embeddings)
batch_size, num_queries, _ = hidden_states.shape
batch_size, sequence_length, _ = encoder_hidden_states.shape
if (spatial_shapes[:, 0] * spatial_shapes[:, 1]).sum() != sequence_length:
raise ValueError('Make sure to align the spatial shapes with the sequence length of the encoder hidden states')
value = self.value_proj(encoder_hidden_states)
if attention_mask is not None:
value = value.masked_fill(attention_mask[..., None], float(0))
value = value.view(batch_size, sequence_length, self.n_heads, self.d_model // self.n_heads)
sampling_offsets = self.sampling_offsets(hidden_states).view(batch_size, num_queries, self.n_heads, self.n_levels, self.n_points, 2)
attention_weights = self.attention_weights(hidden_states).view(batch_size, num_queries, self.n_heads, self.n_levels * self.n_points)
attention_weights = nn.functional.softmax(attention_weights, -1).view(batch_size, num_queries, self.n_heads, self.n_levels, self.n_points)
if reference_points.shape[-1] == 2:
offset_normalizer = torch.stack([spatial_shapes[..., 1], spatial_shapes[..., 0]], -1)
sampling_locations = reference_points[:, :, None, :, None, :] + sampling_offsets / offset_normalizer[None, None, None, :, None, :]
elif reference_points.shape[-1] == 4:
sampling_locations = reference_points[:, :, None, :, None, :2] + sampling_offsets / self.n_points * reference_points[:, :, None, :, None, 2:] * 0.5
else:
raise ValueError(f'Last dim of reference_points must be 2 or 4, but got {reference_points.shape[-1]}')
output = multi_scale_deformable_attention(value, spatial_shapes, sampling_locations, attention_weights)
output = self.output_proj(output)
return (output, attention_weights)
|
class OneFormerPixelDecoderEncoderMultiscaleDeformableAttention(nn.Module):
'''
Multiscale deformable attention as proposed in Deformable DETR.
'''
def __init__(self, embed_dim: int, num_heads: int, n_levels: int, n_points: int):
pass
def with_pos_embed(self, tensor: torch.Tensor, position_embeddings: Optional[Tensor]):
pass
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states=None, encoder_attention_mask=None, position_embeddings: Optional[torch.Tensor]=None, reference_points=None, spatial_shapes=None, level_start_index=None, output_attentions: bool=False):
pass
| 4
| 1
| 28
| 2
| 24
| 2
| 4
| 0.11
| 1
| 6
| 0
| 0
| 3
| 9
| 3
| 13
| 91
| 9
| 74
| 33
| 59
| 8
| 41
| 22
| 37
| 6
| 1
| 1
| 11
|
4,280
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/oneformer/modeling_oneformer.py
|
transformers.models.oneformer.modeling_oneformer.OneFormerPixelDecoderEncoderOnly
|
import torch
from ...modeling_outputs import BaseModelOutput
from .configuration_oneformer import OneFormerConfig
from torch import Tensor, nn
class OneFormerPixelDecoderEncoderOnly(nn.Module):
"""
Transformer encoder consisting of *config.encoder_layers* deformable attention layers. Each layer is a
[`OneFormerPixelDecoderEncoderLayer`].
The encoder updates the flattened multi-scale feature maps through multiple deformable attention layers.
Args:
config: OneFormerConfig
"""
def __init__(self, config: OneFormerConfig):
super().__init__()
self.config = config
self.dropout = config.dropout
self.layers = nn.ModuleList([OneFormerPixelDecoderEncoderLayer(config) for _ in range(config.encoder_layers)])
@staticmethod
def get_reference_points(spatial_shapes, valid_ratios, device):
"""
Get reference points for each feature map. Used in decoder.
Args:
spatial_shapes (`torch.LongTensor` of shape `(num_feature_levels, 2)`):
Spatial shapes of each feature map.
valid_ratios (`torch.FloatTensor` of shape `(batch_size, num_feature_levels, 2)`):
Valid ratios of each feature map.
device (`torch.device`):
Device on which to create the tensors.
Returns:
`torch.FloatTensor` of shape `(batch_size, num_queries, num_feature_levels, 2)`
"""
reference_points_list = []
for lvl, (height, width) in enumerate(spatial_shapes):
ref_y, ref_x = torch.meshgrid(torch.linspace(0.5, height - 0.5, height, dtype=valid_ratios.dtype, device=device), torch.linspace(0.5, width - 0.5, width, dtype=valid_ratios.dtype, device=device))
ref_y = ref_y.reshape(-1)[None] / (valid_ratios[:, None, lvl, 1] * height)
ref_x = ref_x.reshape(-1)[None] / (valid_ratios[:, None, lvl, 0] * width)
ref = torch.stack((ref_x, ref_y), -1)
reference_points_list.append(ref)
reference_points = torch.cat(reference_points_list, 1)
reference_points = reference_points[:, :, None] * valid_ratios[:, None]
return reference_points
def forward(self, inputs_embeds=None, attention_mask=None, position_embeddings=None, spatial_shapes=None, level_start_index=None, valid_ratios=None, output_attentions=None, output_hidden_states=None, return_dict=None):
"""
Args:
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Flattened feature map (output of the backbone + projection layer) that is passed to the encoder.
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding pixel features. Mask values selected in `[0, 1]`:
- 1 for pixel features that are real (i.e. **not masked**),
- 0 for pixel features that are padding (i.e. **masked**).
[What are attention masks?](../glossary#attention-mask)
position_embeddings (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Position embeddings that are added to the queries and keys in each self-attention layer.
spatial_shapes (`torch.LongTensor` of shape `(num_feature_levels, 2)`):
Spatial shapes of each feature map.
level_start_index (`torch.LongTensor` of shape `(num_feature_levels)`):
Starting index of each feature map.
valid_ratios (`torch.FloatTensor` of shape `(batch_size, num_feature_levels, 2)`):
Ratio of valid area in each feature level.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
hidden_states = inputs_embeds
reference_points = self.get_reference_points(spatial_shapes, valid_ratios, device=inputs_embeds.device)
encoder_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
for i, encoder_layer in enumerate(self.layers):
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
layer_outputs = encoder_layer(hidden_states, attention_mask, position_embeddings=position_embeddings, reference_points=reference_points, spatial_shapes=spatial_shapes, level_start_index=level_start_index, output_attentions=output_attentions)
hidden_states = layer_outputs[0]
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions)
|
class OneFormerPixelDecoderEncoderOnly(nn.Module):
'''
Transformer encoder consisting of *config.encoder_layers* deformable attention layers. Each layer is a
[`OneFormerPixelDecoderEncoderLayer`].
The encoder updates the flattened multi-scale feature maps through multiple deformable attention layers.
Args:
config: OneFormerConfig
'''
def __init__(self, config: OneFormerConfig):
pass
@staticmethod
def get_reference_points(spatial_shapes, valid_ratios, device):
'''
Get reference points for each feature map. Used in decoder.
Args:
spatial_shapes (`torch.LongTensor` of shape `(num_feature_levels, 2)`):
Spatial shapes of each feature map.
valid_ratios (`torch.FloatTensor` of shape `(batch_size, num_feature_levels, 2)`):
Valid ratios of each feature map.
device (`torch.device`):
Device on which to create the tensors.
Returns:
`torch.FloatTensor` of shape `(batch_size, num_queries, num_feature_levels, 2)`
'''
pass
def forward(self, inputs_embeds=None, attention_mask=None, position_embeddings=None, spatial_shapes=None, level_start_index=None, valid_ratios=None, output_attentions=None, output_hidden_states=None, return_dict=None):
'''
Args:
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Flattened feature map (output of the backbone + projection layer) that is passed to the encoder.
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding pixel features. Mask values selected in `[0, 1]`:
- 1 for pixel features that are real (i.e. **not masked**),
- 0 for pixel features that are padding (i.e. **masked**).
[What are attention masks?](../glossary#attention-mask)
position_embeddings (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Position embeddings that are added to the queries and keys in each self-attention layer.
spatial_shapes (`torch.LongTensor` of shape `(num_feature_levels, 2)`):
Spatial shapes of each feature map.
level_start_index (`torch.LongTensor` of shape `(num_feature_levels)`):
Starting index of each feature map.
valid_ratios (`torch.FloatTensor` of shape `(batch_size, num_feature_levels, 2)`):
Ratio of valid area in each feature level.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
'''
pass
| 5
| 3
| 35
| 3
| 20
| 13
| 4
| 0.73
| 1
| 6
| 3
| 0
| 2
| 3
| 3
| 13
| 120
| 13
| 62
| 30
| 46
| 45
| 35
| 18
| 31
| 10
| 1
| 2
| 13
|
4,281
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/oneformer/modeling_oneformer.py
|
transformers.models.oneformer.modeling_oneformer.OneFormerPixelDecoderFrozenBatchNorm2d
|
import torch
from torch import Tensor, nn
class OneFormerPixelDecoderFrozenBatchNorm2d(nn.Module):
"""
BatchNorm2d where the batch statistics and the affine parameters are fixed.
Copy-paste from torchvision.misc.ops with added eps before rqsrt, without which any other models than
torchvision.models.resnet[18,34,50,101] produce nans.
"""
def __init__(self, n):
super().__init__()
self.register_buffer('weight', torch.ones(n))
self.register_buffer('bias', torch.zeros(n))
self.register_buffer('running_mean', torch.zeros(n))
self.register_buffer('running_var', torch.ones(n))
def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs):
num_batches_tracked_key = prefix + 'num_batches_tracked'
if num_batches_tracked_key in state_dict:
del state_dict[num_batches_tracked_key]
super()._load_from_state_dict(state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs)
def forward(self, x):
weight = self.weight.reshape(1, -1, 1, 1)
bias = self.bias.reshape(1, -1, 1, 1)
running_var = self.running_var.reshape(1, -1, 1, 1)
running_mean = self.running_mean.reshape(1, -1, 1, 1)
epsilon = 1e-05
scale = weight * (running_var + epsilon).rsqrt()
bias = bias - running_mean * scale
return x * scale + bias
|
class OneFormerPixelDecoderFrozenBatchNorm2d(nn.Module):
'''
BatchNorm2d where the batch statistics and the affine parameters are fixed.
Copy-paste from torchvision.misc.ops with added eps before rqsrt, without which any other models than
torchvision.models.resnet[18,34,50,101] produce nans.
'''
def __init__(self, n):
pass
def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs):
pass
def forward(self, x):
pass
| 4
| 1
| 8
| 0
| 8
| 0
| 1
| 0.2
| 1
| 1
| 0
| 0
| 3
| 0
| 3
| 13
| 35
| 5
| 25
| 13
| 19
| 5
| 21
| 11
| 17
| 2
| 1
| 1
| 4
|
4,282
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/oneformer/modeling_oneformer.py
|
transformers.models.oneformer.modeling_oneformer.OneFormerPixelDecoderOutput
|
from typing import Optional, Union
from dataclasses import dataclass
import torch
from ...utils import ModelOutput, auto_docstring, is_accelerate_available, is_scipy_available, logging, requires_backends
@dataclass
@auto_docstring(custom_intro="\n OneFormer's pixel decoder module output, practically a Multi-Scale Deformable Attention based decoder. It returns\n the mask features and the multiscale features.\n ")
class OneFormerPixelDecoderOutput(ModelOutput):
"""
multi_scale_features (`tuple(torch.FloatTensor)`):
Tuple of multi-scale features of scales [1/8, 1/16, 1/32] and shape `(batch_size, num_channels, height,
width)`from the Multi-Scale Deformable Attenntion based Pixel Decoder.
mask_features (`torch.FloatTensor`):
Tensor of shape `(batch_size, num_channels, height, width)`, 1/4 scale features from the last Pixel Decoder
Layer.
attentions (`tuple(torch.FloatTensor)`, *optional*):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`. Attentions weights from pixel decoder. Returned when `output_attentions=True` is passed
or when `config.output_attentions=True`
"""
multi_scale_features: Optional[tuple[torch.FloatTensor]] = None
mask_features: Optional[torch.FloatTensor] = None
attentions: Optional[tuple[torch.FloatTensor]] = None
|
@dataclass
@auto_docstring(custom_intro="\n OneFormer's pixel decoder module output, practically a Multi-Scale Deformable Attention based decoder. It returns\n the mask features and the multiscale features.\n ")
class OneFormerPixelDecoderOutput(ModelOutput):
'''
multi_scale_features (`tuple(torch.FloatTensor)`):
Tuple of multi-scale features of scales [1/8, 1/16, 1/32] and shape `(batch_size, num_channels, height,
width)`from the Multi-Scale Deformable Attenntion based Pixel Decoder.
mask_features (`torch.FloatTensor`):
Tensor of shape `(batch_size, num_channels, height, width)`, 1/4 scale features from the last Pixel Decoder
Layer.
attentions (`tuple(torch.FloatTensor)`, *optional*):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`. Attentions weights from pixel decoder. Returned when `output_attentions=True` is passed
or when `config.output_attentions=True`
'''
pass
| 3
| 1
| 0
| 0
| 0
| 0
| 0
| 3.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 21
| 2
| 4
| 4
| 3
| 15
| 4
| 4
| 3
| 0
| 1
| 0
| 0
|
4,283
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/oneformer/modeling_oneformer.py
|
transformers.models.oneformer.modeling_oneformer.OneFormerPixelLevelModule
|
from ...utils.backbone_utils import load_backbone
from .configuration_oneformer import OneFormerConfig
from torch import Tensor, nn
class OneFormerPixelLevelModule(nn.Module):
def __init__(self, config: OneFormerConfig):
"""
Pixel Level Module proposed in [Masked-attention Mask Transformer for Universal Image
Segmentation](https://huggingface.co/papers/2112.01527). It runs the input image through a backbone and a pixel
decoder, generating multi-scale feature maps and pixel embeddings.
Args:
config ([`OneFormerConfig`]):
The configuration used to instantiate this model.
"""
super().__init__()
self.encoder = load_backbone(config)
self.decoder = OneFormerPixelDecoder(config, feature_channels=self.encoder.channels)
def forward(self, pixel_values: Tensor, output_hidden_states: bool=False) -> OneFormerPixelLevelModuleOutput:
features: list[Tensor] = self.encoder(pixel_values).feature_maps
decoder_output: OneFormerPixelDecoderOutput = self.decoder(features, output_hidden_states=output_hidden_states)
return OneFormerPixelLevelModuleOutput(encoder_features=tuple(features), decoder_features=decoder_output.multi_scale_features, decoder_last_feature=decoder_output.mask_features)
|
class OneFormerPixelLevelModule(nn.Module):
def __init__(self, config: OneFormerConfig):
'''
Pixel Level Module proposed in [Masked-attention Mask Transformer for Universal Image
Segmentation](https://huggingface.co/papers/2112.01527). It runs the input image through a backbone and a pixel
decoder, generating multi-scale feature maps and pixel embeddings.
Args:
config ([`OneFormerConfig`]):
The configuration used to instantiate this model.
'''
pass
def forward(self, pixel_values: Tensor, output_hidden_states: bool=False) -> OneFormerPixelLevelModuleOutput:
pass
| 3
| 1
| 11
| 1
| 6
| 4
| 1
| 0.62
| 1
| 8
| 4
| 0
| 2
| 2
| 2
| 12
| 23
| 2
| 13
| 7
| 10
| 8
| 9
| 7
| 6
| 1
| 1
| 0
| 2
|
4,284
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/oneformer/modeling_oneformer.py
|
transformers.models.oneformer.modeling_oneformer.OneFormerPixelLevelModuleOutput
|
import torch
from typing import Optional, Union
from dataclasses import dataclass
from ...utils import ModelOutput, auto_docstring, is_accelerate_available, is_scipy_available, logging, requires_backends
@dataclass
@auto_docstring(custom_intro="\n OneFormer's pixel level module output. It returns both the last and (optionally) the hidden states from the\n `encoder` and `decoder`. By default, the `encoder` is a Swin/Dinat Backbone and the `decoder` is a Multi-Scale\n Deformable Attention based decoder.\n ")
class OneFormerPixelLevelModuleOutput(ModelOutput):
"""
encoder_features (List of `(torch.FloatTensor)`):
List of `torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`. Hidden-states (also
called feature maps) of the model at the output of each stage.
decoder_features (List of `(torch.FloatTensor)`):
List of `torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`. Hidden-states (also
called feature maps) of the model at the output of each stage.
decoder_last_feature (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)):
1/4 scale features from the last Pixel Decoder Layer.
"""
encoder_features: Optional[list[torch.FloatTensor]] = None
decoder_features: Optional[list[torch.FloatTensor]] = None
decoder_last_feature: Optional[torch.FloatTensor] = None
|
@dataclass
@auto_docstring(custom_intro="\n OneFormer's pixel level module output. It returns both the last and (optionally) the hidden states from the\n `encoder` and `decoder`. By default, the `encoder` is a Swin/Dinat Backbone and the `decoder` is a Multi-Scale\n Deformable Attention based decoder.\n ")
class OneFormerPixelLevelModuleOutput(ModelOutput):
'''
encoder_features (List of `(torch.FloatTensor)`):
List of `torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`. Hidden-states (also
called feature maps) of the model at the output of each stage.
decoder_features (List of `(torch.FloatTensor)`):
List of `torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`. Hidden-states (also
called feature maps) of the model at the output of each stage.
decoder_last_feature (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)):
1/4 scale features from the last Pixel Decoder Layer.
'''
pass
| 3
| 1
| 0
| 0
| 0
| 0
| 0
| 3.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 20
| 2
| 4
| 4
| 3
| 14
| 4
| 4
| 3
| 0
| 1
| 0
| 0
|
4,285
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/oneformer/modeling_oneformer.py
|
transformers.models.oneformer.modeling_oneformer.OneFormerPreTrainedModel
|
from torch import Tensor, nn
from ...utils import ModelOutput, auto_docstring, is_accelerate_available, is_scipy_available, logging, requires_backends
from .configuration_oneformer import OneFormerConfig
from ...modeling_utils import PreTrainedModel
import torch
import numpy as np
import math
@auto_docstring
class OneFormerPreTrainedModel(PreTrainedModel):
config: OneFormerConfig
base_model_prefix = 'model'
main_input_name = 'pixel_values'
def _init_weights(self, module: nn.Module):
xavier_std = self.config.init_xavier_std
std = self.config.init_std
if isinstance(module, OneFormerTransformerModule):
if module.input_projections is not None:
for input_projection in module.input_projections:
if not isinstance(input_projection, nn.Sequential):
nn.init.xavier_uniform_(input_projection.weight, gain=xavier_std)
nn.init.constant_(input_projection.bias, 0)
elif isinstance(module, OneFormerTransformerDecoder):
nn.init.xavier_uniform_(module.query_input_projection.weight, gain=xavier_std)
nn.init.constant_(module.query_input_projection.bias, 0)
elif isinstance(module, OneFormerPixelDecoderEncoderMultiscaleDeformableAttention):
nn.init.constant_(module.sampling_offsets.weight.data, 0.0)
thetas = torch.arange(module.n_heads, dtype=torch.int64).float() * (2.0 * math.pi / module.n_heads)
grid_init = torch.stack([thetas.cos(), thetas.sin()], -1)
grid_init = (grid_init / grid_init.abs().max(-1, keepdim=True)[0]).view(module.n_heads, 1, 1, 2).repeat(1, module.n_levels, module.n_points, 1)
for i in range(module.n_points):
grid_init[:, :, i, :] *= i + 1
with torch.no_grad():
module.sampling_offsets.bias = nn.Parameter(grid_init.view(-1))
nn.init.constant_(module.attention_weights.weight.data, 0.0)
nn.init.constant_(module.attention_weights.bias.data, 0.0)
nn.init.xavier_uniform_(module.value_proj.weight.data)
nn.init.constant_(module.value_proj.bias.data, 0.0)
nn.init.xavier_uniform_(module.output_proj.weight.data)
nn.init.constant_(module.output_proj.bias.data, 0.0)
elif isinstance(module, OneFormerPixelDecoder):
nn.init.normal_(module.level_embed, std=0)
elif isinstance(module, (OneFormerTransformerDecoderLayer, OneFormerTransformerDecoderQueryTransformer)):
for p in module.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p, gain=xavier_std)
elif isinstance(module, OneFormerTextTransformer):
proj_std = module.width ** (-0.5) * (2 * module.num_layers) ** (-0.5)
attn_std = module.width ** (-0.5)
fc_std = (2 * module.width) ** (-0.5)
for layer in module.layers:
nn.init.normal_(layer.self_attn.in_proj_weight, std=attn_std)
nn.init.normal_(layer.self_attn.out_proj.weight, std=proj_std)
nn.init.normal_(layer.mlp.fc1.weight, std=fc_std)
nn.init.normal_(layer.mlp.fc2.weight, std=proj_std)
elif isinstance(module, OneFormerTextEncoder):
nn.init.normal_(module.token_embedding.weight, std=0.02)
nn.init.normal_(module.positional_embedding, std=0.01)
if hasattr(module, 'reference_points'):
nn.init.xavier_uniform_(module.reference_points.weight.data, gain=1.0)
nn.init.constant_(module.reference_points.bias.data, 0.0)
elif isinstance(module, OneFormerMLPPredictionHead):
for submodule in module.modules():
if isinstance(submodule, nn.Linear):
nn.init.xavier_uniform_(submodule.weight, gain=xavier_std)
nn.init.constant_(submodule.bias, 0)
elif isinstance(module, nn.MultiheadAttention):
module.in_proj_weight.data.normal_(mean=0.0, std=std)
module.in_proj_bias.data.zero_()
elif isinstance(module, (nn.Linear, nn.Conv2d, nn.BatchNorm2d)):
module.weight.data.normal_(mean=0.0, std=std)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, (nn.LayerNorm, nn.GroupNorm)):
module.weight.data.fill_(1.0)
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=std)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, OneFormerLoss):
module.logit_scale.data.fill_(np.log(1 / self.config.contrastive_temperature))
|
@auto_docstring
class OneFormerPreTrainedModel(PreTrainedModel):
def _init_weights(self, module: nn.Module):
pass
| 3
| 0
| 108
| 0
| 108
| 0
| 50
| 0
| 1
| 16
| 15
| 2
| 1
| 0
| 1
| 1
| 113
| 1
| 112
| 17
| 110
| 0
| 90
| 17
| 88
| 50
| 1
| 5
| 50
|
4,286
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/oneformer/modeling_oneformer.py
|
transformers.models.oneformer.modeling_oneformer.OneFormerSinePositionEmbedding
|
from ...pytorch_utils import compile_compatible_method_lru_cache
import torch
import math
from torch import Tensor, nn
from typing import Optional, Union
class OneFormerSinePositionEmbedding(nn.Module):
"""
This is a more standard version of the position embedding, very similar to the one used by the Attention is all you
need paper, generalized to work on images.
"""
def __init__(self, num_pos_feats: int=64, temperature: int=10000, normalize: bool=False, scale: Optional[float]=None):
super().__init__()
if scale is not None and normalize is False:
raise ValueError('normalize should be True if scale is passed')
self.num_pos_feats = num_pos_feats
self.temperature = temperature
self.normalize = normalize
self.scale = 2 * math.pi if scale is None else scale
@compile_compatible_method_lru_cache(maxsize=1)
def forward(self, shape: torch.Size, device: Union[torch.device, str], dtype: torch.dtype, mask: Optional[Tensor]=None) -> Tensor:
if mask is None:
mask = torch.zeros((shape[0], shape[2], shape[3]), device=device, dtype=torch.bool)
not_mask = (~mask).to(dtype)
y_embed = not_mask.cumsum(1)
x_embed = not_mask.cumsum(2)
if self.normalize:
eps = 1e-06
y_embed = y_embed / (y_embed[:, -1:, :] + eps) * self.scale
x_embed = x_embed / (x_embed[:, :, -1:] + eps) * self.scale
dim_t = torch.arange(self.num_pos_feats, dtype=torch.int64, device=device).to(dtype)
dim_t = self.temperature ** (2 * torch.div(dim_t, 2, rounding_mode='floor') / self.num_pos_feats)
pos_x = x_embed[:, :, :, None] / dim_t
pos_y = y_embed[:, :, :, None] / dim_t
pos_x = torch.stack((pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4).flatten(3)
pos_y = torch.stack((pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4).flatten(3)
pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2)
return pos
|
class OneFormerSinePositionEmbedding(nn.Module):
'''
This is a more standard version of the position embedding, very similar to the one used by the Attention is all you
need paper, generalized to work on images.
'''
def __init__(self, num_pos_feats: int=64, temperature: int=10000, normalize: bool=False, scale: Optional[float]=None):
pass
@compile_compatible_method_lru_cache(maxsize=1)
def forward(self, shape: torch.Size, device: Union[torch.device, str], dtype: torch.dtype, mask: Optional[Tensor]=None) -> Tensor:
pass
| 4
| 1
| 15
| 1
| 14
| 0
| 3
| 0.14
| 1
| 6
| 0
| 0
| 2
| 4
| 2
| 12
| 37
| 4
| 29
| 17
| 24
| 4
| 27
| 15
| 24
| 3
| 1
| 1
| 6
|
4,287
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/oneformer/modeling_oneformer.py
|
transformers.models.oneformer.modeling_oneformer.OneFormerTaskModel
|
from .configuration_oneformer import OneFormerConfig
from torch import Tensor, nn
class OneFormerTaskModel(nn.Module):
def __init__(self, config: OneFormerConfig):
super().__init__()
self.task_mlp = OneFormerMLPPredictionHead(config.task_seq_len, config.hidden_dim, config.hidden_dim, 2)
def forward(self, inputs: Tensor) -> Tensor:
task_tokens = self.task_mlp(inputs)
return task_tokens
|
class OneFormerTaskModel(nn.Module):
def __init__(self, config: OneFormerConfig):
pass
def forward(self, inputs: Tensor) -> Tensor:
pass
| 3
| 0
| 6
| 0
| 6
| 0
| 1
| 0
| 1
| 4
| 2
| 0
| 2
| 1
| 2
| 12
| 13
| 1
| 12
| 5
| 9
| 0
| 7
| 5
| 4
| 1
| 1
| 0
| 2
|
4,288
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/oneformer/modeling_oneformer.py
|
transformers.models.oneformer.modeling_oneformer.OneFormerTextContextDecoder
|
from torch import Tensor, nn
class OneFormerTextContextDecoder(nn.Module):
def __init__(self, transformer_width=256, transformer_heads=4, transformer_layers=6, visual_dim=1024, dropout=0.1, layer_norm_eps=1e-05, **kwargs):
super().__init__()
self.memory_proj = nn.Sequential(nn.LayerNorm(visual_dim, eps=layer_norm_eps), nn.Linear(visual_dim, transformer_width), nn.LayerNorm(transformer_width, eps=layer_norm_eps))
self.text_proj = nn.Sequential(nn.LayerNorm(visual_dim, eps=layer_norm_eps), nn.Linear(visual_dim, transformer_width))
self.decoder = nn.ModuleList([OneFormerTextTransformerDecoderLayer(transformer_width, transformer_heads, dropout, layer_norm_eps) for _ in range(transformer_layers)])
self.out_proj = nn.Sequential(nn.LayerNorm(transformer_width, eps=layer_norm_eps), nn.Linear(transformer_width, visual_dim))
def forward(self, text, visual):
visual = self.memory_proj(visual)
hidden_state = self.text_proj(text)
for layer in self.decoder:
hidden_state = layer(hidden_state, visual)
return self.out_proj(hidden_state)
|
class OneFormerTextContextDecoder(nn.Module):
def __init__(self, transformer_width=256, transformer_heads=4, transformer_layers=6, visual_dim=1024, dropout=0.1, layer_norm_eps=1e-05, **kwargs):
pass
def forward(self, text, visual):
pass
| 3
| 0
| 21
| 3
| 18
| 0
| 2
| 0
| 1
| 3
| 1
| 0
| 2
| 4
| 2
| 12
| 43
| 7
| 36
| 19
| 24
| 0
| 13
| 9
| 10
| 2
| 1
| 1
| 3
|
4,289
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/oneformer/modeling_oneformer.py
|
transformers.models.oneformer.modeling_oneformer.OneFormerTextEncoder
|
import torch
from torch import Tensor, nn
class OneFormerTextEncoder(nn.Module):
def __init__(self, context_length: int, width: int, layers: int, vocab_size, use_checkpoint=False, layer_norm_eps=1e-05):
super().__init__()
heads = width // 64
self.context_length = context_length
self.width = width
self.transformer = OneFormerTextTransformer(width=width, layers=layers, heads=heads, attn_mask=self.build_attention_mask(), use_checkpoint=use_checkpoint, layer_norm_eps=layer_norm_eps)
self.positional_embedding = nn.Parameter(torch.empty(self.context_length, width))
self.ln_final = nn.LayerNorm(width, eps=layer_norm_eps)
self.token_embedding = nn.Embedding(vocab_size, width)
def build_attention_mask(self):
mask = torch.empty(self.context_length, self.context_length)
mask.fill_(float('-inf'))
mask.triu_(1)
return mask
def forward(self, text):
hidden_state = self.token_embedding(text)
hidden_state = hidden_state + self.positional_embedding
hidden_state = hidden_state.permute(1, 0, 2)
hidden_state = self.transformer(hidden_state)
hidden_state = hidden_state.permute(1, 0, 2)
hidden_state = self.ln_final(hidden_state)
hidden_state = hidden_state[torch.arange(hidden_state.shape[0]), text.argmax(dim=-1)]
return hidden_state
|
class OneFormerTextEncoder(nn.Module):
def __init__(self, context_length: int, width: int, layers: int, vocab_size, use_checkpoint=False, layer_norm_eps=1e-05):
pass
def build_attention_mask(self):
pass
def forward(self, text):
pass
| 4
| 0
| 14
| 1
| 13
| 1
| 1
| 0.08
| 1
| 4
| 1
| 0
| 3
| 6
| 3
| 13
| 45
| 4
| 39
| 21
| 27
| 3
| 24
| 13
| 20
| 1
| 1
| 0
| 3
|
4,290
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/oneformer/modeling_oneformer.py
|
transformers.models.oneformer.modeling_oneformer.OneFormerTextMLP
|
from torch import Tensor, nn
import torch
from typing import Optional, Union
from ...activations import ACT2FN
class OneFormerTextMLP(nn.Module):
def __init__(self, hidden_size: Optional[int]=None, intermediate_size: Optional[int]=None, output_size: Optional[int]=None):
super().__init__()
self.activation_fn = ACT2FN['quick_gelu']
hidden_size = hidden_size
intermediate_size = intermediate_size
output_size = output_size
self.fc1 = nn.Linear(hidden_size, intermediate_size)
self.fc2 = nn.Linear(intermediate_size, output_size)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.fc1(hidden_states)
hidden_states = self.activation_fn(hidden_states)
hidden_states = self.fc2(hidden_states)
return hidden_states
|
class OneFormerTextMLP(nn.Module):
def __init__(self, hidden_size: Optional[int]=None, intermediate_size: Optional[int]=None, output_size: Optional[int]=None):
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 9
| 0
| 9
| 0
| 1
| 0
| 1
| 3
| 0
| 0
| 2
| 3
| 2
| 12
| 20
| 1
| 19
| 11
| 11
| 0
| 14
| 6
| 11
| 1
| 1
| 0
| 2
|
4,291
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/oneformer/modeling_oneformer.py
|
transformers.models.oneformer.modeling_oneformer.OneFormerTextMapper
|
import torch
from torch import Tensor, nn
from .configuration_oneformer import OneFormerConfig
class OneFormerTextMapper(nn.Module):
def __init__(self, config: OneFormerConfig):
super().__init__()
self.text_encoder = OneFormerTextEncoder(context_length=config.text_encoder_context_length, width=config.text_encoder_width, layers=config.text_encoder_num_layers, vocab_size=config.text_encoder_vocab_size, layer_norm_eps=config.layer_norm_eps)
self.text_projector = OneFormerMLPPredictionHead(config.text_encoder_width, config.hidden_dim, config.hidden_dim, config.text_encoder_proj_layers)
if config.text_encoder_n_ctx > 0:
self.prompt_ctx = nn.Embedding(config.text_encoder_n_ctx, config.text_encoder_width)
else:
self.prompt_ctx = None
def forward(self, inputs: Tensor) -> Tensor:
text_queries = self.encode_text(inputs)
return text_queries
def encode_text(self, text):
if text.ndim is None:
raise ValueError('text must not be NoneType')
if text.ndim not in [2, 3]:
raise ValueError('Number of dimensions in text must be 2 or 3')
squeeze_dim = False
num_text = 1
if text.ndim == 3:
num_text = text.shape[1]
batch_size, num_text, hidden_dim = text.shape
text = text.reshape(batch_size * num_text, hidden_dim)
squeeze_dim = True
encoded_text = self.text_encoder(text)
text_queries = self.text_projector(encoded_text)
if squeeze_dim:
_, hidden_dim = text_queries.shape
text_queries = text_queries.reshape(batch_size, num_text, hidden_dim)
if self.prompt_ctx is not None:
text_queries_ctx = self.prompt_ctx.weight.unsqueeze(0).repeat(text_queries.shape[0], 1, 1)
text_queries = torch.cat([text_queries, text_queries_ctx], dim=1)
return text_queries
|
class OneFormerTextMapper(nn.Module):
def __init__(self, config: OneFormerConfig):
pass
def forward(self, inputs: Tensor) -> Tensor:
pass
def encode_text(self, text):
pass
| 4
| 0
| 19
| 2
| 16
| 0
| 3
| 0.02
| 1
| 6
| 3
| 0
| 3
| 3
| 3
| 13
| 59
| 8
| 50
| 18
| 43
| 1
| 32
| 15
| 28
| 6
| 1
| 2
| 9
|
4,292
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/oneformer/modeling_oneformer.py
|
transformers.models.oneformer.modeling_oneformer.OneFormerTextMapperAttention
|
import torch
from torch import Tensor, nn
class OneFormerTextMapperAttention(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0.0, proj_drop=0.0):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** (-0.5)
self.q_proj = nn.Linear(dim, dim, bias=qkv_bias)
self.k_proj = nn.Linear(dim, dim, bias=qkv_bias)
self.v_proj = nn.Linear(dim, dim, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, q, k, v):
batch_size, q_sequence_length, num_channels = q.shape
if not k.shape == v.shape:
raise ValueError(f'keys ({list(k.shape)}) and values ({list(v.shape)}) have different shapes!')
batch_size, k_sequence_length, num_channels = k.shape
q = self.q_proj(q).reshape(batch_size, q_sequence_length, self.num_heads, num_channels // self.num_heads)
k = self.k_proj(k).reshape(batch_size, k_sequence_length, self.num_heads, num_channels // self.num_heads)
v = self.v_proj(v).reshape(batch_size, k_sequence_length, self.num_heads, num_channels // self.num_heads)
attn = torch.einsum('bnkc,bmkc->bknm', q, k) * self.scale
attn = attn.softmax(dim=-1)
output = torch.einsum('bknm,bmkc->bnkc', attn, v).reshape(batch_size, q_sequence_length, num_channels)
output = self.proj(output)
output = self.proj_drop(output)
return output
|
class OneFormerTextMapperAttention(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0.0, proj_drop=0.0):
pass
def forward(self, q, k, v):
pass
| 3
| 0
| 16
| 3
| 13
| 1
| 2
| 0.04
| 1
| 3
| 0
| 0
| 2
| 8
| 2
| 12
| 34
| 7
| 26
| 16
| 23
| 1
| 26
| 16
| 23
| 2
| 1
| 1
| 3
|
4,293
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/oneformer/modeling_oneformer.py
|
transformers.models.oneformer.modeling_oneformer.OneFormerTextTransformer
|
import torch
from torch import Tensor, nn
from typing import Optional, Union
class OneFormerTextTransformer(nn.Module):
def __init__(self, width: int, layers: int, heads: int, attn_mask: Optional[torch.Tensor]=None, use_checkpoint=False, layer_norm_eps=1e-05):
super().__init__()
self.width = width
self.num_layers = layers
self.layers = nn.Sequential(*[OneFormerTextTransformerLayer(width, heads, attn_mask, layer_norm_eps) for _ in range(layers)])
self.use_checkpoint = use_checkpoint
def forward(self, hidden_states: torch.Tensor):
for layer in self.layers:
hidden_states = layer(hidden_states)
return hidden_states
|
class OneFormerTextTransformer(nn.Module):
def __init__(self, width: int, layers: int, heads: int, attn_mask: Optional[torch.Tensor]=None, use_checkpoint=False, layer_norm_eps=1e-05):
pass
def forward(self, hidden_states: torch.Tensor):
pass
| 3
| 0
| 12
| 0
| 12
| 0
| 2
| 0
| 1
| 5
| 1
| 0
| 2
| 4
| 2
| 12
| 25
| 1
| 24
| 17
| 13
| 0
| 13
| 8
| 10
| 3
| 1
| 2
| 4
|
4,294
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/oneformer/modeling_oneformer.py
|
transformers.models.oneformer.modeling_oneformer.OneFormerTextTransformerDecoderLayer
|
from torch import Tensor, nn
class OneFormerTextTransformerDecoderLayer(nn.Module):
def __init__(self, d_model, nhead, dropout=0.1, layer_norm_eps=1e-05):
super().__init__()
self.self_attn = OneFormerTextMapperAttention(d_model, nhead, proj_drop=dropout)
self.cross_attn = OneFormerTextMapperAttention(d_model, nhead, proj_drop=dropout)
self.norm1 = nn.LayerNorm(d_model, eps=layer_norm_eps)
self.norm2 = nn.LayerNorm(d_model, eps=layer_norm_eps)
self.norm3 = nn.LayerNorm(d_model, eps=layer_norm_eps)
self.dropout = nn.Dropout(dropout)
self.mlp = nn.Sequential(nn.Linear(d_model, d_model * 4), nn.GELU(), nn.Dropout(dropout), nn.Linear(d_model * 4, d_model))
def forward(self, hidden_state, mem):
q = k = v = self.norm1(hidden_state)
hidden_state = hidden_state + self.self_attn(q, k, v)
q = self.norm2(hidden_state)
hidden_state = hidden_state + self.cross_attn(q, mem, mem)
hidden_state = hidden_state + self.dropout(self.mlp(self.norm3(hidden_state)))
return hidden_state
|
class OneFormerTextTransformerDecoderLayer(nn.Module):
def __init__(self, d_model, nhead, dropout=0.1, layer_norm_eps=1e-05):
pass
def forward(self, hidden_state, mem):
pass
| 3
| 0
| 13
| 1
| 12
| 0
| 1
| 0
| 1
| 2
| 1
| 0
| 2
| 7
| 2
| 12
| 28
| 3
| 25
| 17
| 16
| 0
| 17
| 11
| 14
| 1
| 1
| 0
| 2
|
4,295
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/oneformer/modeling_oneformer.py
|
transformers.models.oneformer.modeling_oneformer.OneFormerTextTransformerLayer
|
from torch import Tensor, nn
from typing import Optional, Union
from ...modeling_layers import GradientCheckpointingLayer
import torch
class OneFormerTextTransformerLayer(GradientCheckpointingLayer):
def __init__(self, width: int, heads: int, attn_mask: torch.Tensor, layer_norm_eps=1e-05):
super().__init__()
self.self_attn = nn.MultiheadAttention(width, heads)
self.layer_norm1 = nn.LayerNorm(width, eps=layer_norm_eps)
self.mlp = OneFormerTextMLP(width, width * 4, width)
self.layer_norm2 = nn.LayerNorm(width, eps=layer_norm_eps)
self.attn_mask = attn_mask
def forward(self, hidden_states: torch.Tensor, key_padding_mask: Optional[torch.Tensor]=None) -> torch.FloatTensor:
residual = hidden_states
hidden_states = self.layer_norm1(hidden_states)
hidden_states = self.self_attn(hidden_states, hidden_states, hidden_states, need_weights=False, key_padding_mask=key_padding_mask)[0]
hidden_states = residual + hidden_states
residual = hidden_states
hidden_states = self.layer_norm2(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + hidden_states
return hidden_states
|
class OneFormerTextTransformerLayer(GradientCheckpointingLayer):
def __init__(self, width: int, heads: int, attn_mask: torch.Tensor, layer_norm_eps=1e-05):
pass
def forward(self, hidden_states: torch.Tensor, key_padding_mask: Optional[torch.Tensor]=None) -> torch.FloatTensor:
pass
| 3
| 0
| 15
| 2
| 14
| 0
| 1
| 0
| 1
| 4
| 1
| 0
| 2
| 5
| 2
| 12
| 32
| 4
| 28
| 13
| 21
| 0
| 18
| 9
| 15
| 1
| 1
| 0
| 2
|
4,296
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/oneformer/modeling_oneformer.py
|
transformers.models.oneformer.modeling_oneformer.OneFormerTransformerDecoder
|
import torch
from torch import Tensor, nn
from .configuration_oneformer import OneFormerConfig
class OneFormerTransformerDecoder(nn.Module):
"""
Transformer decoder
"""
def __init__(self, in_channels: int, config: OneFormerConfig):
super().__init__()
self.config = config
self.dropout = config.dropout
self.num_heads = config.num_attention_heads
self.is_training = config.is_training
self.use_task_norm = config.use_task_norm
self.use_auxiliary_loss = config.use_auxiliary_loss
self.query_transformer = OneFormerTransformerDecoderQueryTransformer(d_model=config.hidden_dim, dropout=config.dropout, nhead=config.num_attention_heads, dim_feedforward=config.dim_feedforward, num_decoder_layers=config.query_dec_layers, normalize_before=config.pre_norm, return_intermediate_dec=False, layer_norm_eps=config.layer_norm_eps)
self.decoder_norm = nn.LayerNorm(config.hidden_dim, eps=config.layer_norm_eps)
self.num_feature_levels = 3
self.layers = nn.ModuleList([OneFormerTransformerDecoderLayer(config) for _ in range(config.decoder_layers - 1)])
self.query_input_projection = nn.Conv2d(in_channels, config.hidden_dim, kernel_size=1)
self.class_embed = nn.Linear(config.hidden_dim, config.num_labels + 1)
self.mask_embed = OneFormerMLPPredictionHead(config.hidden_dim, config.hidden_dim, config.mask_dim, 3)
def forward(self, task_token=None, multi_stage_features=None, multi_stage_positional_embeddings=None, mask_features=None, query_features=None, query_embeddings=None, query_embedder=None, size_list=None, output_attentions=None):
if self.use_task_norm:
task_token = self.decoder_norm(task_token)
object_queries = self.query_transformer(query_features, None, query_embedder.weight[:-1], self.query_input_projection(mask_features), task_token if self.use_task_norm else None)
object_queries = object_queries[0].permute(1, 0, 2)
queries = torch.cat([object_queries, task_token], dim=0)
output = queries.clone()
intermediate_class_predictions = []
intermediate_mask_predictions = []
outputs_class, outputs_mask, attention_mask = self.forward_prediction_heads(output, mask_features, attention_mask_target_size=size_list[0])
intermediate_class_predictions.append(outputs_class)
intermediate_mask_predictions.append(outputs_mask)
attentions = ()
for index, layer in enumerate(self.layers):
layer_outputs = layer(index=index, output=output, multi_stage_features=multi_stage_features, multi_stage_positional_embeddings=multi_stage_positional_embeddings, attention_mask=attention_mask, query_embeddings=query_embeddings, output_attentions=output_attentions)
output = layer_outputs[0]
attentions += (layer_outputs[1:],)
outputs_class, outputs_mask, attention_mask = self.forward_prediction_heads(output, mask_features, attention_mask_target_size=size_list[(index + 1) % self.num_feature_levels])
intermediate_class_predictions.append(outputs_class)
intermediate_mask_predictions.append(outputs_mask)
if not len(intermediate_mask_predictions) == len(self.layers) + 1:
raise ValueError('Intermediate predictions in the transformer decoder must have the same number of elements as number of layers')
object_queries = layer_outputs[0].permute(1, 0, 2)
contrastive_logits = queries.permute(1, 0, 2)
return OneFormerTransformerDecoderOutput(object_queries=object_queries, contrastive_logits=contrastive_logits, prediction_masks=intermediate_mask_predictions[-1], prediction_class=intermediate_class_predictions[-1], auxiliary_predictions=self._get_aux_predictions(intermediate_class_predictions, intermediate_mask_predictions) if self.use_auxiliary_loss else None, attentions=attentions)
def forward_prediction_heads(self, output, mask_features, attention_mask_target_size):
decoder_output = self.decoder_norm(output)
decoder_output = decoder_output.transpose(0, 1)
outputs_class = self.class_embed(decoder_output)
mask_embed = self.mask_embed(decoder_output)
outputs_mask = torch.einsum('bqc,bchw->bqhw', mask_embed, mask_features)
attention_mask = nn.functional.interpolate(outputs_mask, size=attention_mask_target_size, mode='bilinear', align_corners=False)
attention_mask = (attention_mask.sigmoid().flatten(2).unsqueeze(1).repeat(1, self.num_heads, 1, 1).flatten(0, 1) < 0.5).bool()
attention_mask = attention_mask.detach()
return (outputs_class, outputs_mask, attention_mask)
@torch.jit.unused
def _get_aux_predictions(self, outputs_class, outputs_seg_masks):
aux_list = [{'class_queries_logits': a, 'masks_queries_logits': b} for a, b in zip(outputs_class[:-1], outputs_seg_masks[:-1])]
return tuple(aux_list)
|
class OneFormerTransformerDecoder(nn.Module):
'''
Transformer decoder
'''
def __init__(self, in_channels: int, config: OneFormerConfig):
pass
def forward(self, task_token=None, multi_stage_features=None, multi_stage_positional_embeddings=None, mask_features=None, query_features=None, query_embeddings=None, query_embedder=None, size_list=None, output_attentions=None):
pass
def forward_prediction_heads(self, output, mask_features, attention_mask_target_size):
pass
@torch.jit.unused
def _get_aux_predictions(self, outputs_class, outputs_seg_masks):
pass
| 6
| 1
| 37
| 6
| 30
| 2
| 2
| 0.07
| 1
| 12
| 5
| 0
| 4
| 13
| 4
| 14
| 158
| 28
| 121
| 47
| 104
| 9
| 54
| 34
| 49
| 6
| 1
| 1
| 9
|
4,297
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/oneformer/modeling_oneformer.py
|
transformers.models.oneformer.modeling_oneformer.OneFormerTransformerDecoderCrossAttentionLayer
|
from ...activations import ACT2FN
from torch import Tensor, nn
from typing import Optional, Union
class OneFormerTransformerDecoderCrossAttentionLayer(nn.Module):
def __init__(self, embed_dim, num_heads, dropout=0.0, activation='relu', normalize_before=False, layer_norm_eps=1e-05):
super().__init__()
self.multihead_attn = nn.MultiheadAttention(embed_dim, num_heads, dropout=dropout)
self.norm = nn.LayerNorm(embed_dim, eps=layer_norm_eps)
self.dropout = nn.Dropout(dropout)
self.activation = ACT2FN[activation]
self.normalize_before = normalize_before
def with_pos_embed(self, tensor, pos: Optional[Tensor]):
return tensor if pos is None else tensor + pos
def forward_post(self, output, memory, memory_mask: Optional[Tensor]=None, memory_key_padding_mask: Optional[Tensor]=None, pos: Optional[Tensor]=None, query_pos: Optional[Tensor]=None):
output2, attention_weights = self.multihead_attn(query=self.with_pos_embed(output, query_pos), key=self.with_pos_embed(memory, pos), value=memory, attn_mask=memory_mask, key_padding_mask=memory_key_padding_mask)
output = output + self.dropout(output2)
output = self.norm(output)
return (output, attention_weights)
def forward_pre(self, output, memory, memory_mask: Optional[Tensor]=None, memory_key_padding_mask: Optional[Tensor]=None, pos: Optional[Tensor]=None, query_pos: Optional[Tensor]=None):
output2 = self.norm(output)
output2, attention_weights = self.multihead_attn(query=self.with_pos_embed(output2, query_pos), key=self.with_pos_embed(memory, pos), value=memory, attn_mask=memory_mask, key_padding_mask=memory_key_padding_mask)
output = output + self.dropout(output2)
return (output, attention_weights)
def forward(self, output, memory, memory_mask: Optional[Tensor]=None, memory_key_padding_mask: Optional[Tensor]=None, pos: Optional[Tensor]=None, query_pos: Optional[Tensor]=None):
if self.normalize_before:
return self.forward_pre(output, memory, memory_mask, memory_key_padding_mask, pos, query_pos)
return self.forward_post(output, memory, memory_mask, memory_key_padding_mask, pos, query_pos)
|
class OneFormerTransformerDecoderCrossAttentionLayer(nn.Module):
def __init__(self, embed_dim, num_heads, dropout=0.0, activation='relu', normalize_before=False, layer_norm_eps=1e-05):
pass
def with_pos_embed(self, tensor, pos: Optional[Tensor]):
pass
def forward_post(self, output, memory, memory_mask: Optional[Tensor]=None, memory_key_padding_mask: Optional[Tensor]=None, pos: Optional[Tensor]=None, query_pos: Optional[Tensor]=None):
pass
def forward_pre(self, output, memory, memory_mask: Optional[Tensor]=None, memory_key_padding_mask: Optional[Tensor]=None, pos: Optional[Tensor]=None, query_pos: Optional[Tensor]=None):
pass
def forward_post(self, output, memory, memory_mask: Optional[Tensor]=None, memory_key_padding_mask: Optional[Tensor]=None, pos: Optional[Tensor]=None, query_pos: Optional[Tensor]=None):
pass
| 6
| 0
| 13
| 1
| 12
| 0
| 1
| 0
| 1
| 2
| 0
| 0
| 5
| 5
| 5
| 15
| 70
| 8
| 62
| 40
| 30
| 0
| 24
| 14
| 18
| 2
| 1
| 1
| 7
|
4,298
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/oneformer/modeling_oneformer.py
|
transformers.models.oneformer.modeling_oneformer.OneFormerTransformerDecoderFFNLayer
|
from torch import Tensor, nn
from typing import Optional, Union
from ...activations import ACT2FN
class OneFormerTransformerDecoderFFNLayer(nn.Module):
def __init__(self, d_model, dim_feedforward=2048, dropout=0.0, activation='relu', normalize_before=False, layer_norm_eps=1e-05):
super().__init__()
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm = nn.LayerNorm(d_model, eps=layer_norm_eps)
self.activation = ACT2FN[activation]
self.normalize_before = normalize_before
def with_pos_embed(self, tensor, pos: Optional[Tensor]):
return tensor if pos is None else tensor + pos
def forward_post(self, output):
output2 = self.linear2(self.dropout(self.activation(self.linear1(output))))
output = output + self.dropout(output2)
output = self.norm(output)
return output
def forward_pre(self, output):
output2 = self.norm(output)
output2 = self.linear2(self.dropout(self.activation(self.linear1(output2))))
output = output + self.dropout(output2)
return output
def forward(self, output):
if self.normalize_before:
return self.forward_pre(output)
return self.forward_post(output)
|
class OneFormerTransformerDecoderFFNLayer(nn.Module):
def __init__(self, d_model, dim_feedforward=2048, dropout=0.0, activation='relu', normalize_before=False, layer_norm_eps=1e-05):
pass
def with_pos_embed(self, tensor, pos: Optional[Tensor]):
pass
def forward_post(self, output):
pass
def forward_pre(self, output):
pass
def forward_post(self, output):
pass
| 6
| 0
| 7
| 0
| 6
| 0
| 1
| 0.03
| 1
| 2
| 0
| 0
| 5
| 6
| 5
| 15
| 40
| 6
| 33
| 22
| 19
| 1
| 25
| 14
| 19
| 2
| 1
| 1
| 7
|
4,299
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/oneformer/modeling_oneformer.py
|
transformers.models.oneformer.modeling_oneformer.OneFormerTransformerDecoderLayer
|
from typing import Optional, Union
import torch
from .configuration_oneformer import OneFormerConfig
from torch import Tensor, nn
class OneFormerTransformerDecoderLayer(nn.Module):
def __init__(self, config: OneFormerConfig):
super().__init__()
self.embed_dim = config.hidden_dim
self.num_feature_levels = 3
self.cross_attn = OneFormerTransformerDecoderCrossAttentionLayer(embed_dim=self.embed_dim, num_heads=config.num_attention_heads, dropout=0.0, normalize_before=config.pre_norm, layer_norm_eps=config.layer_norm_eps)
self.self_attn = OneFormerTransformerDecoderSelfAttentionLayer(embed_dim=self.embed_dim, num_heads=config.num_attention_heads, dropout=0.0, normalize_before=config.pre_norm, layer_norm_eps=config.layer_norm_eps)
self.ffn = OneFormerTransformerDecoderFFNLayer(d_model=self.embed_dim, dim_feedforward=config.dim_feedforward, dropout=0.0, normalize_before=config.pre_norm, layer_norm_eps=config.layer_norm_eps)
def forward(self, index: int, output: torch.Tensor, multi_stage_features: list[torch.Tensor], multi_stage_positional_embeddings: list[torch.Tensor], attention_mask: Optional[torch.Tensor]=None, query_embeddings: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=False):
"""
Args:
index (`int`): index of the layer in the Transformer decoder.
output (`torch.FloatTensor`): the object queries of shape `(N, batch, hidden_dim)`
multi_stage_features (`list[torch.Tensor]`): the multi-scale features from the pixel decoder.
multi_stage_positional_embeddings (`list[torch.Tensor]`):
positional embeddings for the multi_stage_features
attention_mask (`torch.FloatTensor`): attention mask for the masked cross attention layer
query_embeddings (`torch.FloatTensor`, *optional*):
position embeddings that are added to the queries and keys in the self-attention layer.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
"""
level_index = index % self.num_feature_levels
attention_mask[torch.where(attention_mask.sum(-1) == attention_mask.shape[-1])] = False
output, cross_attn_weights = self.cross_attn(output, multi_stage_features[level_index], memory_mask=attention_mask, memory_key_padding_mask=None, pos=multi_stage_positional_embeddings[level_index], query_pos=query_embeddings)
output, self_attn_weights = self.self_attn(output, output_mask=None, output_key_padding_mask=None, query_pos=query_embeddings)
output = self.ffn(output)
outputs = (output,)
if output_attentions:
outputs += (self_attn_weights, cross_attn_weights)
return outputs
|
class OneFormerTransformerDecoderLayer(nn.Module):
def __init__(self, config: OneFormerConfig):
pass
def forward(self, index: int, output: torch.Tensor, multi_stage_features: list[torch.Tensor], multi_stage_positional_embeddings: list[torch.Tensor], attention_mask: Optional[torch.Tensor]=None, query_embeddings: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=False):
'''
Args:
index (`int`): index of the layer in the Transformer decoder.
output (`torch.FloatTensor`): the object queries of shape `(N, batch, hidden_dim)`
multi_stage_features (`list[torch.Tensor]`): the multi-scale features from the pixel decoder.
multi_stage_positional_embeddings (`list[torch.Tensor]`):
positional embeddings for the multi_stage_features
attention_mask (`torch.FloatTensor`): attention mask for the masked cross attention layer
query_embeddings (`torch.FloatTensor`, *optional*):
position embeddings that are added to the queries and keys in the self-attention layer.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
'''
pass
| 3
| 1
| 42
| 5
| 28
| 9
| 2
| 0.32
| 1
| 8
| 4
| 0
| 2
| 5
| 2
| 12
| 85
| 11
| 57
| 21
| 45
| 18
| 18
| 12
| 15
| 2
| 1
| 1
| 3
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.