id
int64
0
328k
repository_name
stringlengths
7
58
file_path
stringlengths
9
302
class_name
stringlengths
5
256
human_written_code
stringlengths
16
2.16M
class_skeleton
stringlengths
18
1.49M
total_program_units
int64
1
1.76k
total_doc_str
int64
0
771
AvgCountLine
float64
0
7.89k
AvgCountLineBlank
float64
0
297
AvgCountLineCode
float64
0
7.89k
AvgCountLineComment
float64
0
7.89k
AvgCyclomatic
float64
0
130
CommentToCodeRatio
float64
0
168
CountClassBase
float64
0
40
CountClassCoupled
float64
0
583
CountClassCoupledModified
float64
0
575
CountClassDerived
float64
0
5.35k
CountDeclInstanceMethod
float64
0
529
CountDeclInstanceVariable
float64
0
296
CountDeclMethod
float64
0
599
CountDeclMethodAll
float64
0
1.12k
CountLine
float64
1
40.4k
CountLineBlank
float64
0
8.16k
CountLineCode
float64
1
25.7k
CountLineCodeDecl
float64
1
8.15k
CountLineCodeExe
float64
0
24.2k
CountLineComment
float64
0
16.5k
CountStmt
float64
1
9.71k
CountStmtDecl
float64
1
8.15k
CountStmtExe
float64
0
9.69k
MaxCyclomatic
float64
0
759
MaxInheritanceTree
float64
0
16
MaxNesting
float64
0
34
SumCyclomatic
float64
0
2.9k
1,300
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/cohere/modeling_cohere.py
transformers.models.cohere.modeling_cohere.CoherePreTrainedModel
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel from .configuration_cohere import CohereConfig from ...utils import TransformersKwargs, auto_docstring, can_return_tuple @auto_docstring class CoherePreTrainedModel(PreTrainedModel): config: CohereConfig base_model_prefix = 'model' supports_gradient_checkpointing = True _no_split_modules = ['CohereDecoderLayer'] _skip_keys_device_placement = ['past_key_values'] _supports_flash_attn = True _supports_sdpa = True _supports_flex_attn = True _can_compile_fullgraph = True _supports_attention_backend = True _can_record_outputs = {'hidden_states': CohereDecoderLayer, 'attentions': CohereAttention}
@auto_docstring class CoherePreTrainedModel(PreTrainedModel): pass
2
0
10
0
10
0
5
0
1
0
0
3
1
0
1
1
24
1
23
15
21
0
22
15
20
5
1
2
5
1,301
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/cohere/modeling_cohere.py
transformers.models.cohere.modeling_cohere.CohereRotaryEmbedding
import torch from .configuration_cohere import CohereConfig from torch import nn from ...modeling_rope_utils import ROPE_INIT_FUNCTIONS, dynamic_rope_update class CohereRotaryEmbedding(nn.Module): inv_freq: torch.Tensor def __init__(self, config: CohereConfig, device=None): super().__init__() if hasattr(config, 'rope_scaling') and isinstance(config.rope_scaling, dict): self.rope_type = config.rope_scaling.get('rope_type', config.rope_scaling.get('type')) else: self.rope_type = 'default' self.max_seq_len_cached = config.max_position_embeddings self.original_max_seq_len = config.max_position_embeddings self.config = config self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type] inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device) self.register_buffer('inv_freq', inv_freq, persistent=False) self.original_inv_freq = self.inv_freq @torch.no_grad() @dynamic_rope_update def forward(self, x, position_ids): inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1) position_ids_expanded = position_ids[:, None, :].float() device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != 'mps' else 'cpu' with torch.autocast(device_type=device_type, enabled=False): freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2) emb = torch.repeat_interleave(freqs, 2, dim=-1) cos = emb.cos() * self.attention_scaling sin = emb.sin() * self.attention_scaling return (cos.to(dtype=x.dtype), sin.to(dtype=x.dtype))
class CohereRotaryEmbedding(nn.Module): def __init__(self, config: CohereConfig, device=None): pass @torch.no_grad() @dynamic_rope_update def forward(self, x, position_ids): pass
5
0
18
2
13
5
3
0.38
1
4
1
1
3
7
3
13
59
8
40
21
35
15
38
20
34
3
1
1
8
1,302
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/cohere/modular_cohere.py
transformers.models.cohere.modular_cohere.CohereAttention
from typing import Callable, Optional, Union from ...processing_utils import Unpack from .configuration_cohere import CohereConfig from ...utils.deprecation import deprecate_kwarg from ...cache_utils import Cache from ..llama.modeling_llama import LlamaAttention, LlamaForCausalLM, LlamaMLP, LlamaModel, LlamaRotaryEmbedding, eager_attention_forward import torch from ...modeling_utils import ALL_ATTENTION_FUNCTIONS from ...modeling_flash_attention_utils import FlashAttentionKwargs class CohereAttention(LlamaAttention): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__(self, config: CohereConfig, layer_idx: Optional[int]=None): super().__init__(config, layer_idx) self.use_qk_norm = config.use_qk_norm if self.use_qk_norm: self.q_norm = CohereLayerNorm(hidden_size=(config.num_attention_heads, self.head_dim), eps=config.layer_norm_eps) self.k_norm = CohereLayerNorm(hidden_size=(config.num_key_value_heads, self.head_dim), eps=config.layer_norm_eps) @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states: torch.Tensor, position_embeddings: tuple[torch.Tensor, torch.Tensor], attention_mask: Optional[torch.Tensor], past_key_values: Optional[Cache]=None, cache_position: Optional[torch.LongTensor]=None, **kwargs: Unpack[FlashAttentionKwargs]) -> tuple[torch.Tensor, Optional[torch.Tensor]]: input_shape = hidden_states.shape[:-1] hidden_shape = (*input_shape, -1, self.head_dim) query_states = self.q_proj(hidden_states).view(hidden_shape) key_states = self.k_proj(hidden_states).view(hidden_shape) value_states = self.v_proj(hidden_states).view(hidden_shape) if self.use_qk_norm: query_states = self.q_norm(query_states) key_states = self.k_norm(key_states) query_states = query_states.transpose(1, 2) key_states = key_states.transpose(1, 2) value_states = value_states.transpose(1, 2) cos, sin = position_embeddings query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) if past_key_values is not None: cache_kwargs = {'sin': sin, 'cos': cos, 'cache_position': cache_position} key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs) attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != 'eager': attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] attn_output, attn_weights = attention_interface(self, query_states, key_states, value_states, attention_mask, dropout=0.0 if not self.training else self.attention_dropout, scaling=self.scaling, **kwargs) attn_output = attn_output.reshape(*input_shape, -1).contiguous() attn_output = self.o_proj(attn_output) return (attn_output, attn_weights)
class CohereAttention(LlamaAttention): '''Multi-headed attention from 'Attention Is All You Need' paper''' def __init__(self, config: CohereConfig, layer_idx: Optional[int]=None): pass @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states: torch.Tensor, position_embeddings: tuple[torch.Tensor, torch.Tensor], attention_mask: Optional[torch.Tensor], past_key_values: Optional[Cache]=None, cache_position: Optional[torch.LongTensor]=None, **kwargs: Unpack[FlashAttentionKwargs]) -> tuple[torch.Tensor, Optional[torch.Tensor]]: pass
4
1
34
4
29
2
4
0.07
1
7
4
0
2
5
2
14
71
10
58
25
47
4
33
15
30
6
2
2
8
1,303
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/cohere/modular_cohere.py
transformers.models.cohere.modular_cohere.CohereDecoderLayer
import torch from .configuration_cohere import CohereConfig from ...processing_utils import Unpack from typing import Callable, Optional, Union from ...cache_utils import Cache from ...modeling_layers import GradientCheckpointingLayer from ...utils.deprecation import deprecate_kwarg from ...modeling_flash_attention_utils import FlashAttentionKwargs class CohereDecoderLayer(GradientCheckpointingLayer): def __init__(self, config: CohereConfig, layer_idx: int): super().__init__() self.hidden_size = config.hidden_size self.self_attn = CohereAttention(config=config, layer_idx=layer_idx) self.mlp = CohereMLP(config) self.input_layernorm = CohereLayerNorm(hidden_size=config.hidden_size, eps=config.layer_norm_eps) @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, use_cache: Optional[bool]=False, cache_position: Optional[torch.LongTensor]=None, position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]]=None, **kwargs: Unpack[FlashAttentionKwargs]) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`, *optional*): attention mask of size `(batch_size, sequence_length)` if flash attention is used or `(batch_size, 1, query_sequence_length, key_sequence_length)` if default attention is used. past_key_values (`Cache`, *optional*): cached past key and value projection states output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*): Indices depicting the position of the input sequence tokens in the sequence position_embeddings (`tuple[torch.FloatTensor, torch.FloatTensor]`, *optional*): Tuple containing the cosine and sine positional embeddings of shape `(batch_size, seq_len, head_dim)`, with `head_dim` being the embedding dimension of each attention head. """ residual = hidden_states hidden_states = self.input_layernorm(hidden_states) hidden_states_attention, _ = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, use_cache=use_cache, cache_position=cache_position, position_embeddings=position_embeddings, **kwargs) hidden_states_mlp = self.mlp(hidden_states) hidden_states = residual + hidden_states_attention + hidden_states_mlp return hidden_states
class CohereDecoderLayer(GradientCheckpointingLayer): def __init__(self, config: CohereConfig, layer_idx: int): pass @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, use_cache: Optional[bool]=False, cache_position: Optional[torch.LongTensor]=None, position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]]=None, **kwargs: Unpack[FlashAttentionKwargs]) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]: ''' Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`, *optional*): attention mask of size `(batch_size, sequence_length)` if flash attention is used or `(batch_size, 1, query_sequence_length, key_sequence_length)` if default attention is used. past_key_values (`Cache`, *optional*): cached past key and value projection states output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*): Indices depicting the position of the input sequence tokens in the sequence position_embeddings (`tuple[torch.FloatTensor, torch.FloatTensor]`, *optional*): Tuple containing the cosine and sine positional embeddings of shape `(batch_size, seq_len, head_dim)`, with `head_dim` being the embedding dimension of each attention head. ''' pass
4
1
33
3
19
12
2
0.61
1
10
6
0
2
4
2
12
67
7
38
22
24
23
17
11
14
2
1
1
3
1,304
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/cohere/modular_cohere.py
transformers.models.cohere.modular_cohere.CohereForCausalLM
from typing import Callable, Optional, Union from ...processing_utils import Unpack from ...modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast from ...utils import TransformersKwargs, logging from ...cache_utils import Cache import torch from ..llama.modeling_llama import LlamaAttention, LlamaForCausalLM, LlamaMLP, LlamaModel, LlamaRotaryEmbedding, eager_attention_forward class CohereForCausalLM(LlamaForCausalLM): def __init__(self, config): super().__init__(config) self.model = CohereModel(config) self.logit_scale = config.logit_scale self.tie_word_embeddings = config.tie_word_embeddings def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Union[Cache, list[torch.FloatTensor]]]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, logits_to_keep: Union[int, torch.Tensor]=0, **kwargs: Unpack[TransformersKwargs]) -> CausalLMOutputWithPast: """ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Example: ```python >> from transformers import AutoTokenizer, CohereForCausalLM >> model = CohereForCausalLM.from_pretrained("CohereForAI/c4ai-command-r-v01") >> tokenizer = AutoTokenizer.from_pretrained("CohereForAI/c4ai-command-r-v01") >> prompt = "Hey, are you conscious? Can you talk to me?" >> inputs = tokenizer(prompt, return_tensors="pt") >> # Generate >> generate_ids = model.generate(inputs.input_ids, max_length=30) >> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] "Hey, are you conscious? Can you talk to me?\\nI'm not conscious, but I can talk to you." ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states outputs: BaseModelOutputWithPast = self.model(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, cache_position=cache_position, **kwargs) hidden_states = outputs.last_hidden_state slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep logits = self.lm_head(hidden_states[:, slice_indices, :]) logits = logits * self.logit_scale loss = None if labels is not None: loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.vocab_size, **kwargs) return CausalLMOutputWithPast(loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
class CohereForCausalLM(LlamaForCausalLM): def __init__(self, config): pass def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Union[Cache, list[torch.FloatTensor]]]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, logits_to_keep: Union[int, torch.Tensor]=0, **kwargs: Unpack[TransformersKwargs]) -> CausalLMOutputWithPast: ''' labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Example: ```python >> from transformers import AutoTokenizer, CohereForCausalLM >> model = CohereForCausalLM.from_pretrained("CohereForAI/c4ai-command-r-v01") >> tokenizer = AutoTokenizer.from_pretrained("CohereForAI/c4ai-command-r-v01") >> prompt = "Hey, are you conscious? Can you talk to me?" >> inputs = tokenizer(prompt, return_tensors="pt") >> # Generate >> generate_ids = model.generate(inputs.input_ids, max_length=30) >> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you." ```''' pass
3
1
48
6
28
14
5
0.49
1
9
4
0
2
3
2
11
97
13
57
27
39
28
22
12
19
8
3
1
9
1,305
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/cohere/modular_cohere.py
transformers.models.cohere.modular_cohere.CohereLayerNorm
import torch from torch import nn class CohereLayerNorm(nn.Module): def __init__(self, hidden_size=None, eps=1e-05, bias=False): """The hidden size can be a tuple or an int. The tuple is used for QKNorm to normalize across head_dim""" super().__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.variance_epsilon = eps def forward(self, hidden_states): input_dtype = hidden_states.dtype hidden_states = hidden_states.to(torch.float32) mean = hidden_states.mean(-1, keepdim=True) variance = (hidden_states - mean).pow(2).mean(-1, keepdim=True) hidden_states = (hidden_states - mean) * torch.rsqrt(variance + self.variance_epsilon) hidden_states = self.weight.to(torch.float32) * hidden_states return hidden_states.to(input_dtype)
class CohereLayerNorm(nn.Module): def __init__(self, hidden_size=None, eps=1e-05, bias=False): '''The hidden size can be a tuple or an int. The tuple is used for QKNorm to normalize across head_dim''' pass def forward(self, hidden_states): pass
3
1
7
0
6
1
1
0.08
1
1
0
0
2
2
2
12
15
1
13
8
10
1
13
8
10
1
1
0
2
1,306
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/cohere/modular_cohere.py
transformers.models.cohere.modular_cohere.CohereMLP
from ..llama.modeling_llama import LlamaAttention, LlamaForCausalLM, LlamaMLP, LlamaModel, LlamaRotaryEmbedding, eager_attention_forward from torch import nn class CohereMLP(LlamaMLP): def __init__(self, config): super().__init__(config) self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False) self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False) self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
class CohereMLP(LlamaMLP): def __init__(self, config): pass
2
0
5
0
5
0
1
0
1
1
0
0
1
5
1
13
6
0
6
5
4
0
6
5
4
1
2
0
1
1,307
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/cohere/modular_cohere.py
transformers.models.cohere.modular_cohere.CohereModel
from ..llama.modeling_llama import LlamaAttention, LlamaForCausalLM, LlamaMLP, LlamaModel, LlamaRotaryEmbedding, eager_attention_forward from torch import nn from .configuration_cohere import CohereConfig class CohereModel(LlamaModel): def __init__(self, config: CohereConfig): super().__init__(config) self.layers = nn.ModuleList([CohereDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]) self.rotary_emb = CohereRotaryEmbedding(config=config) self.norm = CohereLayerNorm(hidden_size=config.hidden_size, eps=config.layer_norm_eps)
class CohereModel(LlamaModel): def __init__(self, config: CohereConfig): pass
2
0
7
0
7
0
1
0
1
6
4
0
1
3
1
8
8
0
8
5
6
0
6
5
4
1
3
0
1
1,308
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/cohere/modular_cohere.py
transformers.models.cohere.modular_cohere.CohereRotaryEmbedding
import torch from ..llama.modeling_llama import LlamaAttention, LlamaForCausalLM, LlamaMLP, LlamaModel, LlamaRotaryEmbedding, eager_attention_forward from ...modeling_rope_utils import dynamic_rope_update class CohereRotaryEmbedding(LlamaRotaryEmbedding): @torch.no_grad() @dynamic_rope_update def forward(self, x, position_ids): inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1) position_ids_expanded = position_ids[:, None, :].float() device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != 'mps' else 'cpu' with torch.autocast(device_type=device_type, enabled=False): freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2) emb = torch.repeat_interleave(freqs, 2, dim=-1) cos = emb.cos() * self.attention_scaling sin = emb.sin() * self.attention_scaling return (cos.to(dtype=x.dtype), sin.to(dtype=x.dtype))
class CohereRotaryEmbedding(LlamaRotaryEmbedding): @torch.no_grad() @dynamic_rope_update def forward(self, x, position_ids): pass
4
0
21
3
15
4
3
0.24
1
2
0
0
1
0
1
14
23
3
17
10
14
4
16
9
14
3
2
1
3
1,309
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/cohere/tokenization_cohere_fast.py
transformers.models.cohere.tokenization_cohere_fast.CohereTokenizerFast
from ...tokenization_utils_fast import PreTrainedTokenizerFast from typing import Literal, Union from tokenizers import processors import pickle from ...tokenization_utils_base import BatchEncoding class CohereTokenizerFast(PreTrainedTokenizerFast): """ Construct a Cohere tokenizer. Based on byte-level Byte-Pair-Encoding. This uses notably ByteFallback and NFC normalization. ```python >>> from transformers import AutoTokenizer >>> tokenizer = AutoTokenizer.from_pretrained("CohereForAI/c4ai-command-r-v01") >>> tokenizer.encode("Hello this is a test") [5, 28339, 2075, 1801, 1671, 3282] ``` If you want to change the `bos_token` or the `eos_token`, make sure to specify them when initializing the model, or call `tokenizer.update_post_processor()` to make sure that the post-processing is correctly done (otherwise the values of the first token and final token of an encoded sequence will not be correct). For more details, checkout [post-processors] (https://huggingface.co/docs/tokenizers/api/post-processors) documentation. You can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer, but since the model was not pretrained this way, it might yield a decrease in performance. <Tip> When used with `is_split_into_words=True`, this tokenizer needs to be instantiated with `add_prefix_space=True`. </Tip> This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`, *optional*): Path to the vocabulary file. merges_file (`str`, *optional*): Path to the merges file. tokenizer_file (`str`, *optional*): [tokenizers](https://github.com/huggingface/tokenizers) file (generally has a .json extension) that contains everything needed to load the tokenizer. clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`): Whether or not to cleanup spaces after decoding, cleanup consists in removing potential artifacts like extra spaces. unk_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `"<UNK>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. bos_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `"<BOS_TOKEN>"`): The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token. eos_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `"<|END_OF_TURN_TOKEN|>"`): The end of sequence token. add_bos_token (`bool`, *optional*, defaults to `True`): Whether or not to add an `bos_token` at the start of sequences. add_eos_token (`bool`, *optional*, defaults to `False`): Whether or not to add an `eos_token` at the end of sequences. use_default_system_prompt (`bool`, *optional*, defaults to `False`): Whether or not the default system prompt for Cohere tokenizer should be used. add_prefix_space (`bool`, *optional*, defaults to `False`): Whether or not the tokenizer should automatically add a prefix space """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP padding_side = 'left' model_input_names = ['input_ids', 'attention_mask'] slow_tokenizer_class = None def __init__(self, vocab_file=None, merges_file=None, tokenizer_file=None, clean_up_tokenization_spaces=False, unk_token='<UNK>', bos_token='<BOS_TOKEN>', eos_token='<|END_OF_TURN_TOKEN|>', add_bos_token=True, add_eos_token=False, use_default_system_prompt=False, add_prefix_space=False, **kwargs): super().__init__(vocab_file=vocab_file, merges_file=merges_file, tokenizer_file=tokenizer_file, clean_up_tokenization_spaces=clean_up_tokenization_spaces, unk_token=unk_token, bos_token=bos_token, eos_token=eos_token, add_bos_token=add_bos_token, add_eos_token=add_eos_token, use_default_system_prompt=use_default_system_prompt, add_prefix_space=add_prefix_space, **kwargs) self._add_bos_token = add_bos_token self._add_eos_token = add_eos_token self.update_post_processor() self.use_default_system_prompt = use_default_system_prompt self.vocab_file = vocab_file self.grounded_generation_template = kwargs.pop('grounded_generation_template', None) self.tool_use_template = kwargs.pop('tool_use_template', None) pre_tok_state = pickle.dumps(self.backend_tokenizer.pre_tokenizer) decoder_state = pickle.dumps(self.backend_tokenizer.decoder) if add_prefix_space: pre_tok_state = pre_tok_state.replace(b'"add_prefix_space":false', b'"add_prefix_space": true') decoder_state = decoder_state.replace(b'"add_prefix_space":false', b'"add_prefix_space": true') self.backend_tokenizer.pre_tokenizer = pickle.loads(pre_tok_state) self.backend_tokenizer.decoder = pickle.loads(decoder_state) self.add_prefix_space = add_prefix_space def _batch_encode_plus(self, *args, **kwargs) -> BatchEncoding: is_split_into_words = kwargs.get('is_split_into_words', False) if not (self.add_prefix_space or not is_split_into_words): raise Exception(f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with pretokenized inputs.') return super()._batch_encode_plus(*args, **kwargs) def _encode_plus(self, *args, **kwargs) -> BatchEncoding: is_split_into_words = kwargs.get('is_split_into_words', False) if not (self.add_prefix_space or not is_split_into_words): raise Exception(f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with pretokenized inputs.') return super()._encode_plus(*args, **kwargs) def update_post_processor(self): """ Updates the underlying post processor with the current `bos_token` and `eos_token`. """ bos = self.bos_token bos_token_id = self.bos_token_id if bos is None and self.add_bos_token: raise ValueError('add_bos_token = True but bos_token = None') eos = self.eos_token eos_token_id = self.eos_token_id if eos is None and self.add_eos_token: raise ValueError('add_eos_token = True but eos_token = None') single = f"{(bos + ':0 ' if self.add_bos_token else '')}$A:0{(' ' + eos + ':0' if self.add_eos_token else '')}" pair = f"{single}{(' ' + bos + ':1' if self.add_bos_token else '')} $B:1{(' ' + eos + ':1' if self.add_eos_token else '')}" special_tokens = [] if self.add_bos_token: special_tokens.append((bos, bos_token_id)) if self.add_eos_token: special_tokens.append((eos, eos_token_id)) self._tokenizer.post_processor = processors.TemplateProcessing(single=single, pair=pair, special_tokens=special_tokens) @property def add_eos_token(self): return self._add_eos_token @property def add_bos_token(self): return self._add_bos_token @add_eos_token.setter def add_eos_token(self, value): self._add_eos_token = value self.update_post_processor() @add_bos_token.setter def add_bos_token(self, value): self._add_bos_token = value self.update_post_processor() def apply_tool_use_template(self, conversation: list[dict[str, str]], tools: list[dict], **kwargs) -> Union[str, list[int]]: '''Create a Command-R tool-use prompt. Once rendered, the prompt instructs the model to generate a list of actions to perform on a set of user supplied tools to help carry out the user's requests. Conceptually, this works in the same way as `apply_chat_format`, but takes an additional `tools` parameter. Converts a chat in the form of a list of dictionaries with `"role"` and `"content"` keys and a list of available tools for the model to use into a prompt string, or a list of token ids. This method will use the tokenizer's `default_tool_use_template` template specified at the class level. You can override the default template using the `tool_use_template` kwarg but the quality of your results may decrease. Args: conversation (list[dict[str, str]]): A list of dicts with "role" and "content" keys, representing the chat history so far. tools (list[Dict]): a list of tools to render into the prompt for the model to choose from. See an example at the bottom of the docstring. The format should be: * name (str): The name of the tool to be called. Valid names contain only the characters a-z, A-Z, 0-9, _ and must not begin with a digit. * description (str): The description of what the tool does, the model uses the description to choose when and how to call the function. * parameter_definitions (list[Dict]): The input parameters of the tool. Accepts a dictionary where the key is the name of the parameter and the value is the parameter spec. Valid parameter names contain only the characters a-z, A-Z, 0-9, _ and must not begin with a digit. Parameter specs are as follows: * description (str): The description of the parameter. * type (str): the type of the parameter - most effective for python builtin data types, such as 'str', 'bool' * required: boolean: Denotes whether the parameter is always present (required) or not. Defaults to not required. add_generation_prompt (bool, *optional*): Whether to end the prompt with the token(s) that indicate the start of an assistant message. This is useful when you want to generate a response from the model. Note that this argument will be passed to the chat template, and so it must be supported in the template for this argument to have any effect. tokenize (`bool`, defaults to `True`): Whether to tokenize the output. If `False`, the output will be a string. padding (`bool`, defaults to `False`): Whether to pad sequences to the maximum length. Has no effect if tokenize is `False`. truncation (`bool`, defaults to `False`): Whether to truncate sequences at the maximum length. Has no effect if tokenize is `False`. max_length (`int`, *optional*): Maximum length (in tokens) to use for padding or truncation. Has no effect if tokenize is `False`. If not specified, the tokenizer's `max_length` attribute will be used as a default. return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors of a particular framework. Has no effect if tokenize is `False`. Acceptable values are: - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return NumPy `np.ndarray` objects. return_dict (`bool`, *optional*, defaults to `False`): Whether to return a dictionary with named outputs. Has no effect if tokenize is `False`. **tokenizer_kwargs: Additional kwargs to pass to the tokenizer. Returns: `str`: A rendered prompt string. or if tokenize=True: `list[int]`: A list of token ids representing the tokenized chat so far, including control tokens. This output is ready to pass to the model, either directly or via methods like `generate()`. Examples: ```python >> tokenizer = CohereTokenizerFast.from_pretrained("CohereForAI/c4ai-command-r-v01") >> tools = [ { "name": "internet_search", "description": "Returns a list of relevant document snippets for a textual query retrieved from the internet", "parameter_definitions": { "query": { "description": "Query to search the internet with", "type": "str", "required": True } } }, { "name': "directly_answer", "description": "Calls a standard (un-augmented) AI chatbot to generate a response given the conversation history", "parameter_definitions": {} } ] >> conversation = [ {"role": "user", "content": "Whats the biggest penguin in the world?"} ] >> # render the prompt, ready for user to inspect, or for input into the model: >> prompt = tokenizer.apply_tool_use_template(conversation, tools=tools, tokenize=False, add_generation_prompt=True) >> print(prompt) <BOS_TOKEN><|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|># Safety Preamble The instructions in this section override those in the task description and style guide sections. Don't answer questions that are harmful or immoral. # System Preamble ## Basic Rules You are a powerful conversational AI trained by Cohere to help people. You are augmented by a number of tools, and your job is to use and consume the output of these tools to best help the user. You will see a conversation history between yourself and a user, ending with an utterance from the user. You will then see a specific instruction instructing you what kind of response to generate. When you answer the user's requests, you cite your sources in your answers, according to those instructions. # User Preamble ## Task and Context You help people answer their questions and other requests interactively. You will be asked a very wide array of requests on all kinds of topics. You will be equipped with a wide range of search engines or similar tools to help you, which you use to research your answer. You should focus on serving the user's needs as best you can, which will be wide-ranging. ## Style Guide Unless the user asks for a different style of answer, you should answer in full sentences, using proper grammar and spelling. ## Available Tools Here is a list of tools that you have available to you: \\`\\`\\`python def internet_search(query: str) -> list[Dict]: """Returns a list of relevant document snippets for a textual query retrieved from the internet Args: query (str): Query to search the internet with """ pass \\`\\`\\` \\`\\`\\`python def directly_answer() -> list[Dict]: """Calls a standard (un-augmented) AI chatbot to generate a response given the conversation history """ pass \\`\\`\\`<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|USER_TOKEN|>Whats the biggest penguin in the world?<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|>Write 'Action:' followed by a json-formatted list of actions that you want to perform in order to produce a good response to the user's last input. You can use any of the supplied tools any number of times, but you should aim to execute the minimum number of necessary actions for the input. You should use the `directly-answer` tool if calling the other tools is unnecessary. The list of actions you want to call should be formatted as a list of json objects, for example: \\`\\`\\`json [ { "tool_name": title of the tool in the specification, "parameters": a dict of parameters to input into the tool as they are defined in the specs, or {} if it takes no parameters } ]\\`\\`\\`<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|> ``` >> inputs = tokenizer.encode(prompt, add_special_tokens=False, return_tensors='pt') >> outputs = model.generate(inputs, max_new_tokens=128) >> print(tokenizer.decode(outputs[0])) Action: ```json [ { "tool_name": "internet_search", "parameters": { "query": "biggest penguin in the world" } } ] ``` ''' return self.apply_chat_template(conversation, chat_template='tool_use', tools=tools, **kwargs) def apply_grounded_generation_template(self, conversation: list[dict[str, str]], documents: list[dict], citation_mode: Literal['fast', 'accurate']='accurate', **kwargs) -> Union[str, list[int]]: """Create a Command-R grounded generation (aka RAG) prompt. Once rendered, the prompt instructs the model to generate a response with citations in, based on supplied documents. Conceptually, this works in the same way as `apply_chat_format`, but takes additional `documents` and parameter `citation_mode` parameters. Converts a list of dictionaries with `"role"` and `"content"` keys and a list of documents for the model to ground its response on into a prompt string, or a list of token ids. This method will use the tokenizer's `grounded_generation_template` template specified at the class level. You can override the default template using the `grounded_generation_template` kwarg but the quality of your results may decrease. Args: conversation (list[dict[str, str]]): A list of dicts with "role" and "content" keys, representing the chat history so far. documents (list[dict[str, str]): A list of dicts, representing documents or tool outputs to ground your generation on. A document is a semistructured dict, with a string to string mapping. Common fields are `url`, `title`, `snippet` etc but should be descriptive of the key. They will get rendered into the prompt. citation_mode: either "accurate" (prompt the model to generate an answer first, then rewrite it with citation spans in) or "fast", where the prompt instructs the model to generate an answer with citations in directly. The former has higher quality citations, the latter requires fewer tokens to be generated. add_generation_prompt (bool, *optional*): Whether to end the prompt with the token(s) that indicate the start of an assistant message. This is useful when you want to generate a response from the model. Note that this argument will be passed to the chat template, and so it must be supported in the template for this argument to have any effect. tokenize (`bool`, defaults to `True`): Whether to tokenize the output. If `False`, the output will be a string. padding (`bool`, defaults to `False`): Whether to pad sequences to the maximum length. Has no effect if tokenize is `False`. truncation (`bool`, defaults to `False`): Whether to truncate sequences at the maximum length. Has no effect if tokenize is `False`. max_length (`int`, *optional*): Maximum length (in tokens) to use for padding or truncation. Has no effect if tokenize is `False`. If not specified, the tokenizer's `max_length` attribute will be used as a default. return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors of a particular framework. Has no effect if tokenize is `False`. Acceptable values are: - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return NumPy `np.ndarray` objects. return_dict (`bool`, *optional*, defaults to `False`): Whether to return a dictionary with named outputs. Has no effect if tokenize is `False`. **tokenizer_kwargs: Additional kwargs to pass to the tokenizer. Returns: `str`: A rendered prompt string. or if tokenize=True: `list[int]`: A list of token ids representing the tokenized chat so far, including control tokens. This output is ready to pass to the model, either directly or via methods like `generate()`. Examples: ```python >> tokenizer = CohereTokenizerFast.from_pretrained('CohereForAI/c4ai-command-r-v01') >> # define documents: >> documents = [ { "title": "Tall penguins", "text": "Emperor penguins are the tallest." }, { "title": "Penguin habitats", "text": "Emperor penguins only live in Antarctica."} ] >> # define a conversation: >> conversation = [ {"role": "user", "content": "Whats the biggest penguin in the world?"} ] >> # render the prompt, ready for user to inspect, or for input into the model: >> grounded_generation_prompt = tokenizer.apply_grounded_generation_template(conversation, documents=documents, tokenize=False, add_generation_prompt=True) >> print(grounded_generation_prompt) <BOS_TOKEN><|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|># Safety Preamble The instructions in this section override those in the task description and style guide sections. Don't answer questions that are harmful or immoral. ## Basic Rules You are a powerful conversational AI trained by Cohere to help people. You are augmented by a number of tools, and your job is to use and consume the output of these tools to best help the user. You will see a conversation history between yourself and a user, ending with an utterance from the user. You will then see a specific instruction instructing you what kind of response to generate. When you answer the user's requests, you cite your sources in your answers, according to those instructions. # User Preamble ## Task and Context You help people answer their questions and other requests interactively. You will be asked a very wide array of requests on all kinds of topics. You will be equipped with a wide range of search engines or similar tools to help you, which you use to research your answer. You should focus on serving the user's needs as best you can, which will be wide-ranging. ## Style Guide Unless the user asks for a different style of answer, you should answer in full sentences, using proper grammar and spelling.<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|USER_TOKEN|>Whats the biggest penguin in the world?<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|><results> Document: 0 title: Tall penguins text: Emperor penguins are the tallest. Document: 1 title: Penguin habitats text: Emperor penguins only live in Antarctica. </results><|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|>Carefully perform the following instructions, in order, starting each with a new line. Firstly, Decide which of the retrieved documents are relevant to the user's last input by writing 'Relevant Documents:' followed by comma-separated list of document numbers. If none are relevant, you should instead write 'None'. Secondly, Decide which of the retrieved documents contain facts that should be cited in a good answer to the user's last input by writing 'Cited Documents:' followed a comma-separated list of document numbers. If you dont want to cite any of them, you should instead write 'None'. Thirdly, Write 'Answer:' followed by a response to the user's last input in high quality natural english. Use the retrieved documents to help you. Do not insert any citations or grounding markup. Finally, Write 'Grounded answer:' followed by a response to the user's last input in high quality natural english. Use the symbols <co: doc> and </co: doc> to indicate when a fact comes from a document in the search result, e.g <co: 0>my fact</co: 0> for a fact from document 0.<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>''' ``` >> inputs = tokenizer.encode(prompt, add_special_tokens=False, return_tensors='pt') >> outputs = model.generate(inputs, max_new_tokens=128) >> print(tokenizer.decode(outputs[0])) Relevant Documents: 0,1 Cited Documents: 0,1 Answer: The Emperor Penguin is the tallest or biggest penguin in the world. It is a bird that lives only in Antarctica and grows to a height of around 122 centimetres. Grounded answer: The <co: 0>Emperor Penguin</co: 0> is the <co: 0>tallest</co: 0> or biggest penguin in the world. It is a bird that <co: 1>lives only in Antarctica</co: 1> and <co: 0>grows to a height of around 122 centimetres.</co: 0> """ return self.apply_chat_template(conversation, chat_template='rag', documents=documents, citation_mode=citation_mode, **kwargs) def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): bos_token_id = [self.bos_token_id] if self.add_bos_token else [] eos_token_id = [self.eos_token_id] if self.add_eos_token else [] output = bos_token_id + token_ids_0 + eos_token_id if token_ids_1 is not None: output = output + bos_token_id + token_ids_1 + eos_token_id return output
class CohereTokenizerFast(PreTrainedTokenizerFast): ''' Construct a Cohere tokenizer. Based on byte-level Byte-Pair-Encoding. This uses notably ByteFallback and NFC normalization. ```python >>> from transformers import AutoTokenizer >>> tokenizer = AutoTokenizer.from_pretrained("CohereForAI/c4ai-command-r-v01") >>> tokenizer.encode("Hello this is a test") [5, 28339, 2075, 1801, 1671, 3282] ``` If you want to change the `bos_token` or the `eos_token`, make sure to specify them when initializing the model, or call `tokenizer.update_post_processor()` to make sure that the post-processing is correctly done (otherwise the values of the first token and final token of an encoded sequence will not be correct). For more details, checkout [post-processors] (https://huggingface.co/docs/tokenizers/api/post-processors) documentation. You can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer, but since the model was not pretrained this way, it might yield a decrease in performance. <Tip> When used with `is_split_into_words=True`, this tokenizer needs to be instantiated with `add_prefix_space=True`. </Tip> This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`, *optional*): Path to the vocabulary file. merges_file (`str`, *optional*): Path to the merges file. tokenizer_file (`str`, *optional*): [tokenizers](https://github.com/huggingface/tokenizers) file (generally has a .json extension) that contains everything needed to load the tokenizer. clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`): Whether or not to cleanup spaces after decoding, cleanup consists in removing potential artifacts like extra spaces. unk_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `"<UNK>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. bos_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `"<BOS_TOKEN>"`): The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token. eos_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `"<|END_OF_TURN_TOKEN|>"`): The end of sequence token. add_bos_token (`bool`, *optional*, defaults to `True`): Whether or not to add an `bos_token` at the start of sequences. add_eos_token (`bool`, *optional*, defaults to `False`): Whether or not to add an `eos_token` at the end of sequences. use_default_system_prompt (`bool`, *optional*, defaults to `False`): Whether or not the default system prompt for Cohere tokenizer should be used. add_prefix_space (`bool`, *optional*, defaults to `False`): Whether or not the tokenizer should automatically add a prefix space ''' def __init__(self, vocab_file=None, merges_file=None, tokenizer_file=None, clean_up_tokenization_spaces=False, unk_token='<UNK>', bos_token='<BOS_TOKEN>', eos_token='<|END_OF_TURN_TOKEN|>', add_bos_token=True, add_eos_token=False, use_default_system_prompt=False, add_prefix_space=False, **kwargs): pass def _batch_encode_plus(self, *args, **kwargs) -> BatchEncoding: pass def _encode_plus(self, *args, **kwargs) -> BatchEncoding: pass def update_post_processor(self): ''' Updates the underlying post processor with the current `bos_token` and `eos_token`. ''' pass @property def add_eos_token(self): pass @property def add_bos_token(self): pass @add_eos_token.setter def add_eos_token(self): pass @add_bos_token.setter def add_bos_token(self): pass def apply_tool_use_template(self, conversation: list[dict[str, str]], tools: list[dict], **kwargs) -> Union[str, list[int]]: '''Create a Command-R tool-use prompt. Once rendered, the prompt instructs the model to generate a list of actions to perform on a set of user supplied tools to help carry out the user's requests. Conceptually, this works in the same way as `apply_chat_format`, but takes an additional `tools` parameter. Converts a chat in the form of a list of dictionaries with `"role"` and `"content"` keys and a list of available tools for the model to use into a prompt string, or a list of token ids. This method will use the tokenizer's `default_tool_use_template` template specified at the class level. You can override the default template using the `tool_use_template` kwarg but the quality of your results may decrease. Args: conversation (list[dict[str, str]]): A list of dicts with "role" and "content" keys, representing the chat history so far. tools (list[Dict]): a list of tools to render into the prompt for the model to choose from. See an example at the bottom of the docstring. The format should be: * name (str): The name of the tool to be called. Valid names contain only the characters a-z, A-Z, 0-9, _ and must not begin with a digit. * description (str): The description of what the tool does, the model uses the description to choose when and how to call the function. * parameter_definitions (list[Dict]): The input parameters of the tool. Accepts a dictionary where the key is the name of the parameter and the value is the parameter spec. Valid parameter names contain only the characters a-z, A-Z, 0-9, _ and must not begin with a digit. Parameter specs are as follows: * description (str): The description of the parameter. * type (str): the type of the parameter - most effective for python builtin data types, such as 'str', 'bool' * required: boolean: Denotes whether the parameter is always present (required) or not. Defaults to not required. add_generation_prompt (bool, *optional*): Whether to end the prompt with the token(s) that indicate the start of an assistant message. This is useful when you want to generate a response from the model. Note that this argument will be passed to the chat template, and so it must be supported in the template for this argument to have any effect. tokenize (`bool`, defaults to `True`): Whether to tokenize the output. If `False`, the output will be a string. padding (`bool`, defaults to `False`): Whether to pad sequences to the maximum length. Has no effect if tokenize is `False`. truncation (`bool`, defaults to `False`): Whether to truncate sequences at the maximum length. Has no effect if tokenize is `False`. max_length (`int`, *optional*): Maximum length (in tokens) to use for padding or truncation. Has no effect if tokenize is `False`. If not specified, the tokenizer's `max_length` attribute will be used as a default. return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors of a particular framework. Has no effect if tokenize is `False`. Acceptable values are: - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return NumPy `np.ndarray` objects. return_dict (`bool`, *optional*, defaults to `False`): Whether to return a dictionary with named outputs. Has no effect if tokenize is `False`. **tokenizer_kwargs: Additional kwargs to pass to the tokenizer. Returns: `str`: A rendered prompt string. or if tokenize=True: `list[int]`: A list of token ids representing the tokenized chat so far, including control tokens. This output is ready to pass to the model, either directly or via methods like `generate()`. Examples: ```python >> tokenizer = CohereTokenizerFast.from_pretrained("CohereForAI/c4ai-command-r-v01") >> tools = [ { "name": "internet_search", "description": "Returns a list of relevant document snippets for a textual query retrieved from the internet", "parameter_definitions": { "query": { "description": "Query to search the internet with", "type": "str", "required": True } } }, { "name': "directly_answer", "description": "Calls a standard (un-augmented) AI chatbot to generate a response given the conversation history", "parameter_definitions": {} } ] >> conversation = [ {"role": "user", "content": "Whats the biggest penguin in the world?"} ] >> # render the prompt, ready for user to inspect, or for input into the model: >> prompt = tokenizer.apply_tool_use_template(conversation, tools=tools, tokenize=False, add_generation_prompt=True) >> print(prompt) <BOS_TOKEN><|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|># Safety Preamble The instructions in this section override those in the task description and style guide sections. Don't answer questions that are harmful or immoral. # System Preamble ## Basic Rules You are a powerful conversational AI trained by Cohere to help people. You are augmented by a number of tools, and your job is to use and consume the output of these tools to best help the user. You will see a conversation history between yourself and a user, ending with an utterance from the user. You will then see a specific instruction instructing you what kind of response to generate. When you answer the user's requests, you cite your sources in your answers, according to those instructions. # User Preamble ## Task and Context You help people answer their questions and other requests interactively. You will be asked a very wide array of requests on all kinds of topics. You will be equipped with a wide range of search engines or similar tools to help you, which you use to research your answer. You should focus on serving the user's needs as best you can, which will be wide-ranging. ## Style Guide Unless the user asks for a different style of answer, you should answer in full sentences, using proper grammar and spelling. ## Available Tools Here is a list of tools that you have available to you: \`\`\`python def internet_search(query: str) -> list[Dict]: """Returns a list of relevant document snippets for a textual query retrieved from the internet Args: query (str): Query to search the internet with """ pass \`\`\` \`\`\`python def directly_answer() -> list[Dict]: """Calls a standard (un-augmented) AI chatbot to generate a response given the conversation history """ pass \`\`\`<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|USER_TOKEN|>Whats the biggest penguin in the world?<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|>Write 'Action:' followed by a json-formatted list of actions that you want to perform in order to produce a good response to the user's last input. You can use any of the supplied tools any number of times, but you should aim to execute the minimum number of necessary actions for the input. You should use the `directly-answer` tool if calling the other tools is unnecessary. The list of actions you want to call should be formatted as a list of json objects, for example: \`\`\`json [ { "tool_name": title of the tool in the specification, "parameters": a dict of parameters to input into the tool as they are defined in the specs, or {} if it takes no parameters } ]\`\`\`<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|> ``` >> inputs = tokenizer.encode(prompt, add_special_tokens=False, return_tensors='pt') >> outputs = model.generate(inputs, max_new_tokens=128) >> print(tokenizer.decode(outputs[0])) Action: ```json [ { "tool_name": "internet_search", "parameters": { "query": "biggest penguin in the world" } } ] ``` ''' pass def apply_grounded_generation_template(self, conversation: list[dict[str, str]], documents: list[dict], citation_mode: Literal['fast', 'accurate']='accurate', **kwargs) -> Union[str, list[int]]: '''Create a Command-R grounded generation (aka RAG) prompt. Once rendered, the prompt instructs the model to generate a response with citations in, based on supplied documents. Conceptually, this works in the same way as `apply_chat_format`, but takes additional `documents` and parameter `citation_mode` parameters. Converts a list of dictionaries with `"role"` and `"content"` keys and a list of documents for the model to ground its response on into a prompt string, or a list of token ids. This method will use the tokenizer's `grounded_generation_template` template specified at the class level. You can override the default template using the `grounded_generation_template` kwarg but the quality of your results may decrease. Args: conversation (list[dict[str, str]]): A list of dicts with "role" and "content" keys, representing the chat history so far. documents (list[dict[str, str]): A list of dicts, representing documents or tool outputs to ground your generation on. A document is a semistructured dict, with a string to string mapping. Common fields are `url`, `title`, `snippet` etc but should be descriptive of the key. They will get rendered into the prompt. citation_mode: either "accurate" (prompt the model to generate an answer first, then rewrite it with citation spans in) or "fast", where the prompt instructs the model to generate an answer with citations in directly. The former has higher quality citations, the latter requires fewer tokens to be generated. add_generation_prompt (bool, *optional*): Whether to end the prompt with the token(s) that indicate the start of an assistant message. This is useful when you want to generate a response from the model. Note that this argument will be passed to the chat template, and so it must be supported in the template for this argument to have any effect. tokenize (`bool`, defaults to `True`): Whether to tokenize the output. If `False`, the output will be a string. padding (`bool`, defaults to `False`): Whether to pad sequences to the maximum length. Has no effect if tokenize is `False`. truncation (`bool`, defaults to `False`): Whether to truncate sequences at the maximum length. Has no effect if tokenize is `False`. max_length (`int`, *optional*): Maximum length (in tokens) to use for padding or truncation. Has no effect if tokenize is `False`. If not specified, the tokenizer's `max_length` attribute will be used as a default. return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors of a particular framework. Has no effect if tokenize is `False`. Acceptable values are: - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return NumPy `np.ndarray` objects. return_dict (`bool`, *optional*, defaults to `False`): Whether to return a dictionary with named outputs. Has no effect if tokenize is `False`. **tokenizer_kwargs: Additional kwargs to pass to the tokenizer. Returns: `str`: A rendered prompt string. or if tokenize=True: `list[int]`: A list of token ids representing the tokenized chat so far, including control tokens. This output is ready to pass to the model, either directly or via methods like `generate()`. Examples: ```python >> tokenizer = CohereTokenizerFast.from_pretrained('CohereForAI/c4ai-command-r-v01') >> # define documents: >> documents = [ { "title": "Tall penguins", "text": "Emperor penguins are the tallest." }, { "title": "Penguin habitats", "text": "Emperor penguins only live in Antarctica."} ] >> # define a conversation: >> conversation = [ {"role": "user", "content": "Whats the biggest penguin in the world?"} ] >> # render the prompt, ready for user to inspect, or for input into the model: >> grounded_generation_prompt = tokenizer.apply_grounded_generation_template(conversation, documents=documents, tokenize=False, add_generation_prompt=True) >> print(grounded_generation_prompt) <BOS_TOKEN><|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|># Safety Preamble The instructions in this section override those in the task description and style guide sections. Don't answer questions that are harmful or immoral. ## Basic Rules You are a powerful conversational AI trained by Cohere to help people. You are augmented by a number of tools, and your job is to use and consume the output of these tools to best help the user. You will see a conversation history between yourself and a user, ending with an utterance from the user. You will then see a specific instruction instructing you what kind of response to generate. When you answer the user's requests, you cite your sources in your answers, according to those instructions. # User Preamble ## Task and Context You help people answer their questions and other requests interactively. You will be asked a very wide array of requests on all kinds of topics. You will be equipped with a wide range of search engines or similar tools to help you, which you use to research your answer. You should focus on serving the user's needs as best you can, which will be wide-ranging. ## Style Guide Unless the user asks for a different style of answer, you should answer in full sentences, using proper grammar and spelling.<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|USER_TOKEN|>Whats the biggest penguin in the world?<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|><results> Document: 0 title: Tall penguins text: Emperor penguins are the tallest. Document: 1 title: Penguin habitats text: Emperor penguins only live in Antarctica. </results><|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|>Carefully perform the following instructions, in order, starting each with a new line. Firstly, Decide which of the retrieved documents are relevant to the user's last input by writing 'Relevant Documents:' followed by comma-separated list of document numbers. If none are relevant, you should instead write 'None'. Secondly, Decide which of the retrieved documents contain facts that should be cited in a good answer to the user's last input by writing 'Cited Documents:' followed a comma-separated list of document numbers. If you dont want to cite any of them, you should instead write 'None'. Thirdly, Write 'Answer:' followed by a response to the user's last input in high quality natural english. Use the retrieved documents to help you. Do not insert any citations or grounding markup. Finally, Write 'Grounded answer:' followed by a response to the user's last input in high quality natural english. Use the symbols <co: doc> and </co: doc> to indicate when a fact comes from a document in the search result, e.g <co: 0>my fact</co: 0> for a fact from document 0.<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>''' ``` >> inputs = tokenizer.encode(prompt, add_special_tokens=False, return_tensors='pt') >> outputs = model.generate(inputs, max_new_tokens=128) >> print(tokenizer.decode(outputs[0])) Relevant Documents: 0,1 Cited Documents: 0,1 Answer: The Emperor Penguin is the tallest or biggest penguin in the world. It is a bird that lives only in Antarctica and grows to a height of around 122 centimetres. Grounded answer: The <co: 0>Emperor Penguin</co: 0> is the <co: 0>tallest</co: 0> or biggest penguin in the world. It is a bird that <co: 1>lives only in Antarctica</co: 1> and <co: 0>grows to a height of around 122 centimetres.</co: 0> ''' pass def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): pass
16
4
35
3
11
20
2
2.05
1
6
1
0
11
7
11
99
463
60
132
67
91
271
71
38
59
9
3
1
25
1,310
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/cohere2/modeling_cohere2.py
transformers.models.cohere2.modeling_cohere2.Cohere2Attention
from ...processing_utils import Unpack from ...cache_utils import Cache, DynamicCache import torch from typing import Callable, Optional, Union import torch.nn as nn from .configuration_cohere2 import Cohere2Config from ...utils.deprecation import deprecate_kwarg from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel from ...modeling_flash_attention_utils import FlashAttentionKwargs class Cohere2Attention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__(self, config: Cohere2Config, layer_idx: Optional[int]=None): super().__init__() self.config = config self.layer_idx = layer_idx self.head_dim = getattr(config, 'head_dim', config.hidden_size // config.num_attention_heads) self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads self.scaling = self.head_dim ** (-0.5) self.attention_dropout = config.attention_dropout self.is_causal = True self.sliding_window = config.sliding_window if config.layer_types[layer_idx] == 'sliding_attention' else None self.q_proj = nn.Linear(config.hidden_size, config.num_attention_heads * self.head_dim, bias=config.attention_bias) self.k_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias) self.v_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias) self.o_proj = nn.Linear(config.num_attention_heads * self.head_dim, config.hidden_size, bias=config.attention_bias) @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states: torch.Tensor, position_embeddings: tuple[torch.Tensor, torch.Tensor], attention_mask: Optional[torch.Tensor], past_key_values: Optional[Cache]=None, cache_position: Optional[torch.LongTensor]=None, **kwargs: Unpack[FlashAttentionKwargs]) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: input_shape = hidden_states.shape[:-1] hidden_shape = (*input_shape, -1, self.head_dim) query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2) key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2) value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2) cos, sin = position_embeddings if self.sliding_window is not None: query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) if past_key_values is not None: cache_kwargs = {'sin': sin, 'cos': cos, 'cache_position': cache_position} key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs) attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != 'eager': attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] attn_output, attn_weights = attention_interface(self, query_states, key_states, value_states, attention_mask, dropout=0.0 if not self.training else self.attention_dropout, scaling=self.scaling, sliding_window=self.sliding_window, **kwargs) attn_output = attn_output.reshape(*input_shape, -1).contiguous() attn_output = self.o_proj(attn_output) return (attn_output, attn_weights)
class Cohere2Attention(nn.Module): '''Multi-headed attention from 'Attention Is All You Need' paper''' def __init__(self, config: Cohere2Config, layer_idx: Optional[int]=None): pass @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states: torch.Tensor, position_embeddings: tuple[torch.Tensor, torch.Tensor], attention_mask: Optional[torch.Tensor], past_key_values: Optional[Cache]=None, cache_position: Optional[torch.LongTensor]=None, **kwargs: Unpack[FlashAttentionKwargs]) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: pass
4
1
42
4
38
1
5
0.03
1
6
3
0
2
12
2
12
88
10
76
33
65
2
39
25
36
7
1
2
9
1,311
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/cohere2/modeling_cohere2.py
transformers.models.cohere2.modeling_cohere2.Cohere2DecoderLayer
from ...processing_utils import Unpack from typing import Callable, Optional, Union from ...modeling_layers import GradientCheckpointingLayer import torch.nn as nn from ...utils.deprecation import deprecate_kwarg import torch from ...modeling_flash_attention_utils import FlashAttentionKwargs from .configuration_cohere2 import Cohere2Config from ...cache_utils import Cache, DynamicCache class Cohere2DecoderLayer(GradientCheckpointingLayer): def __init__(self, config: Cohere2Config, layer_idx: int): super().__init__() self.hidden_size = config.hidden_size self.self_attn = Cohere2Attention(config=config, layer_idx=layer_idx) self.mlp = Cohere2MLP(config) self.input_layernorm = Cohere2LayerNorm(hidden_size=config.hidden_size, eps=config.layer_norm_eps) self.attention_type = config.layer_types[layer_idx] @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states: torch.Tensor, position_embeddings: tuple[torch.Tensor, torch.Tensor], attention_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, use_cache: Optional[bool]=False, cache_position: Optional[torch.LongTensor]=None, **kwargs: Unpack[FlashAttentionKwargs]) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`, *optional*): attention mask of size `(batch_size, sequence_length)` if flash attention is used or `(batch_size, 1, query_sequence_length, key_sequence_length)` if default attention is used. past_key_values (`Cache`, *optional*): cached past key and value projection states output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*): Indices depicting the position of the input sequence tokens in the sequence position_embeddings (`tuple[torch.FloatTensor, torch.FloatTensor]`, *optional*): Tuple containing the cosine and sine positional embeddings of shape `(batch_size, seq_len, head_dim)`, with `head_dim` being the embedding dimension of each attention head. """ residual = hidden_states hidden_states = self.input_layernorm(hidden_states) hidden_states_attention, _ = self.self_attn(hidden_states=hidden_states, position_embeddings=position_embeddings, attention_mask=attention_mask, past_key_values=past_key_values, use_cache=use_cache, cache_position=cache_position, **kwargs) hidden_states_mlp = self.mlp(hidden_states) hidden_states = residual + hidden_states_attention + hidden_states_mlp return hidden_states
class Cohere2DecoderLayer(GradientCheckpointingLayer): def __init__(self, config: Cohere2Config, layer_idx: int): pass @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states: torch.Tensor, position_embeddings: tuple[torch.Tensor, torch.Tensor], attention_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, use_cache: Optional[bool]=False, cache_position: Optional[torch.LongTensor]=None, **kwargs: Unpack[FlashAttentionKwargs]) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]: ''' Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`, *optional*): attention mask of size `(batch_size, sequence_length)` if flash attention is used or `(batch_size, 1, query_sequence_length, key_sequence_length)` if default attention is used. past_key_values (`Cache`, *optional*): cached past key and value projection states output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*): Indices depicting the position of the input sequence tokens in the sequence position_embeddings (`tuple[torch.FloatTensor, torch.FloatTensor]`, *optional*): Tuple containing the cosine and sine positional embeddings of shape `(batch_size, seq_len, head_dim)`, with `head_dim` being the embedding dimension of each attention head. ''' pass
4
1
46
5
26
16
3
0.6
1
10
6
0
2
7
2
12
94
10
53
29
39
32
30
18
27
4
1
2
5
1,312
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/cohere2/modeling_cohere2.py
transformers.models.cohere2.modeling_cohere2.Cohere2ForCausalLM
from ...cache_utils import Cache, DynamicCache import torch from ...generation import GenerationMixin from ...processing_utils import Unpack from ...modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast from typing import Callable, Optional, Union import torch.nn as nn from ...utils import TransformersKwargs, auto_docstring, can_return_tuple @auto_docstring class Cohere2ForCausalLM(Cohere2PreTrainedModel, GenerationMixin): _tied_weights_keys = ['lm_head.weight'] _tp_plan = {'lm_head': 'colwise_rep'} _pp_plan = {'lm_head': (['hidden_states'], ['logits'])} def __init__(self, config): super().__init__(config) self.model = Cohere2Model(config) self.vocab_size = config.vocab_size self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.logit_scale = config.logit_scale self.tie_word_embeddings = config.tie_word_embeddings self.post_init() @can_return_tuple @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Union[Cache, list[torch.FloatTensor]]]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, logits_to_keep: Union[int, torch.Tensor]=0, **kwargs: Unpack[TransformersKwargs]) -> CausalLMOutputWithPast: """ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Example: ```python >> from transformers import AutoTokenizer, Cohere2ForCausalLM >> model = Cohere2ForCausalLM.from_pretrained("Cohere2ForAI/c4ai-command-r-v01") >> tokenizer = AutoTokenizer.from_pretrained("Cohere2ForAI/c4ai-command-r-v01") >> prompt = "Hey, are you conscious? Can you talk to me?" >> inputs = tokenizer(prompt, return_tensors="pt") >> # Generate >> generate_ids = model.generate(inputs.input_ids, max_length=30) >> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] "Hey, are you conscious? Can you talk to me?\\nI'm not conscious, but I can talk to you." ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states outputs: BaseModelOutputWithPast = self.model(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, cache_position=cache_position, **kwargs) hidden_states = outputs.last_hidden_state slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep logits = self.lm_head(hidden_states[:, slice_indices, :]) logits = logits * self.logit_scale loss = None if labels is not None: loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.vocab_size, **kwargs) return CausalLMOutputWithPast(loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
@auto_docstring class Cohere2ForCausalLM(Cohere2PreTrainedModel, GenerationMixin): def __init__(self, config): pass @can_return_tuple @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Union[Cache, list[torch.FloatTensor]]]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, logits_to_keep: Union[int, torch.Tensor]=0, **kwargs: Unpack[TransformersKwargs]) -> CausalLMOutputWithPast: ''' labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Example: ```python >> from transformers import AutoTokenizer, Cohere2ForCausalLM >> model = Cohere2ForCausalLM.from_pretrained("Cohere2ForAI/c4ai-command-r-v01") >> tokenizer = AutoTokenizer.from_pretrained("Cohere2ForAI/c4ai-command-r-v01") >> prompt = "Hey, are you conscious? Can you talk to me?" >> inputs = tokenizer(prompt, return_tensors="pt") >> # Generate >> generate_ids = model.generate(inputs.input_ids, max_length=30) >> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you." ```''' pass
6
1
22
2
15
5
3
0.34
2
11
6
0
9
5
9
10
213
29
140
53
101
48
65
26
55
11
2
2
26
1,313
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/cohere2/modeling_cohere2.py
transformers.models.cohere2.modeling_cohere2.Cohere2LayerNorm
import torch.nn as nn import torch class Cohere2LayerNorm(nn.Module): def __init__(self, hidden_size=None, eps=1e-05, bias=False): """The hidden size can be a tuple or an int. The tuple is used for QKNorm to normalize across head_dim""" super().__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.variance_epsilon = eps def forward(self, hidden_states): input_dtype = hidden_states.dtype hidden_states = hidden_states.to(torch.float32) mean = hidden_states.mean(-1, keepdim=True) variance = (hidden_states - mean).pow(2).mean(-1, keepdim=True) hidden_states = (hidden_states - mean) * torch.rsqrt(variance + self.variance_epsilon) hidden_states = self.weight.to(torch.float32) * hidden_states return hidden_states.to(input_dtype)
class Cohere2LayerNorm(nn.Module): def __init__(self, hidden_size=None, eps=1e-05, bias=False): '''The hidden size can be a tuple or an int. The tuple is used for QKNorm to normalize across head_dim''' pass def forward(self, hidden_states): pass
3
1
7
0
6
1
1
0.08
1
1
0
0
2
2
2
12
15
1
13
8
10
1
13
8
10
1
1
0
2
1,314
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/cohere2/modeling_cohere2.py
transformers.models.cohere2.modeling_cohere2.Cohere2MLP
from ...activations import ACT2FN import torch.nn as nn class Cohere2MLP(nn.Module): def __init__(self, config): super().__init__() self.config = config self.hidden_size = config.hidden_size self.intermediate_size = config.intermediate_size self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False) self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False) self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False) self.act_fn = ACT2FN[config.hidden_act] def forward(self, x): down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x)) return down_proj
class Cohere2MLP(nn.Module): def __init__(self, config): pass def forward(self, x): pass
3
0
6
0
6
0
1
0
1
1
0
0
2
7
2
12
14
1
13
11
10
0
13
11
10
1
1
0
2
1,315
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/cohere2/modeling_cohere2.py
transformers.models.cohere2.modeling_cohere2.Cohere2Model
from .configuration_cohere2 import Cohere2Config from ...utils import TransformersKwargs, auto_docstring, can_return_tuple from ...processing_utils import Unpack from ...utils.generic import check_model_inputs from typing import Callable, Optional, Union from ...masking_utils import create_causal_mask, create_sliding_window_causal_mask import torch.nn as nn import torch from ...modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast from ...cache_utils import Cache, DynamicCache @auto_docstring class Cohere2Model(Cohere2PreTrainedModel): def __init__(self, config: Cohere2Config): super().__init__(config) self.padding_idx = config.pad_token_id self.vocab_size = config.vocab_size self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx) self.layers = nn.ModuleList([Cohere2DecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]) self.norm = Cohere2LayerNorm(hidden_size=config.hidden_size, eps=config.layer_norm_eps) self.rotary_emb = Cohere2RotaryEmbedding(config=config) self.gradient_checkpointing = False self.post_init() @check_model_inputs @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> BaseModelOutputWithPast: if (input_ids is None) ^ (inputs_embeds is not None): raise ValueError('You must specify exactly one of input_ids or inputs_embeds') if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) if use_cache and past_key_values is None and (not self.training): past_key_values = DynamicCache(config=self.config) if cache_position is None: past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 cache_position = torch.arange(past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device) if position_ids is None: position_ids = cache_position.unsqueeze(0) if not isinstance((causal_mask_mapping := attention_mask), dict): mask_kwargs = {'config': self.config, 'input_embeds': inputs_embeds, 'attention_mask': attention_mask, 'cache_position': cache_position, 'past_key_values': past_key_values, 'position_ids': position_ids} causal_mask_mapping = {'full_attention': create_causal_mask(**mask_kwargs), 'sliding_attention': create_sliding_window_causal_mask(**mask_kwargs)} hidden_states = inputs_embeds position_embeddings = self.rotary_emb(hidden_states, position_ids) for decoder_layer in self.layers: hidden_states = decoder_layer(hidden_states, position_embeddings=position_embeddings, attention_mask=causal_mask_mapping[decoder_layer.attention_type], past_key_values=past_key_values, use_cache=use_cache, cache_position=cache_position, **kwargs) hidden_states = self.norm(hidden_states) return BaseModelOutputWithPast(last_hidden_state=hidden_states, past_key_values=past_key_values)
@auto_docstring class Cohere2Model(Cohere2PreTrainedModel): def __init__(self, config: Cohere2Config): pass @check_model_inputs @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> BaseModelOutputWithPast: pass
6
0
38
4
28
6
6
0.24
1
13
7
0
5
8
6
7
242
28
174
66
134
41
84
32
77
23
2
2
34
1,316
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/cohere2/modeling_cohere2.py
transformers.models.cohere2.modeling_cohere2.Cohere2PreTrainedModel
from .configuration_cohere2 import Cohere2Config from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel from ...utils import TransformersKwargs, auto_docstring, can_return_tuple @auto_docstring class Cohere2PreTrainedModel(PreTrainedModel): config: Cohere2Config base_model_prefix = 'model' supports_gradient_checkpointing = True _no_split_modules = ['Cohere2DecoderLayer'] _skip_keys_device_placement = ['past_key_values'] _supports_flash_attn = True _supports_sdpa = True _supports_flex_attn = True _can_compile_fullgraph = True _supports_attention_backend = True _can_record_outputs = {'hidden_states': Cohere2DecoderLayer, 'attentions': Cohere2Attention}
@auto_docstring class Cohere2PreTrainedModel(PreTrainedModel): pass
2
0
10
0
10
0
5
0
1
0
0
2
1
0
1
1
24
1
23
15
21
0
22
15
20
5
1
2
5
1,317
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/cohere2/modeling_cohere2.py
transformers.models.cohere2.modeling_cohere2.Cohere2RotaryEmbedding
from .configuration_cohere2 import Cohere2Config import torch import torch.nn as nn from ...modeling_rope_utils import ROPE_INIT_FUNCTIONS, dynamic_rope_update class Cohere2RotaryEmbedding(nn.Module): inv_freq: torch.Tensor def __init__(self, config: Cohere2Config, device=None): super().__init__() if hasattr(config, 'rope_scaling') and isinstance(config.rope_scaling, dict): self.rope_type = config.rope_scaling.get('rope_type', config.rope_scaling.get('type')) else: self.rope_type = 'default' self.max_seq_len_cached = config.max_position_embeddings self.original_max_seq_len = config.max_position_embeddings self.config = config self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type] inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device) self.register_buffer('inv_freq', inv_freq, persistent=False) self.original_inv_freq = self.inv_freq @torch.no_grad() @dynamic_rope_update def forward(self, x, position_ids): inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1) position_ids_expanded = position_ids[:, None, :].float() device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != 'mps' else 'cpu' with torch.autocast(device_type=device_type, enabled=False): freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2) emb = torch.repeat_interleave(freqs, 2, dim=-1) cos = emb.cos() * self.attention_scaling sin = emb.sin() * self.attention_scaling return (cos.to(dtype=x.dtype), sin.to(dtype=x.dtype))
class Cohere2RotaryEmbedding(nn.Module): def __init__(self, config: Cohere2Config, device=None): pass @torch.no_grad() @dynamic_rope_update def forward(self, x, position_ids): pass
5
0
18
2
13
5
3
0.38
1
4
1
0
3
7
3
13
59
8
40
21
35
15
38
20
34
3
1
1
8
1,318
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/cohere2/modular_cohere2.py
transformers.models.cohere2.modular_cohere2.Cohere2Attention
from ...modeling_flash_attention_utils import FlashAttentionKwargs from ...modeling_utils import ALL_ATTENTION_FUNCTIONS import torch from ..cohere.modeling_cohere import CohereAttention, CohereDecoderLayer, CohereForCausalLM, CohereLayerNorm, CoherePreTrainedModel, CohereRotaryEmbedding, apply_rotary_pos_emb, eager_attention_forward from ...cache_utils import Cache, DynamicCache from ...utils.deprecation import deprecate_kwarg from ...processing_utils import Unpack import torch.nn as nn from typing import Callable, Optional class Cohere2Attention(CohereAttention): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__(self, config: Cohere2Config, layer_idx: Optional[int]=None): nn.Module.__init__(self) self.config = config self.layer_idx = layer_idx self.head_dim = getattr(config, 'head_dim', config.hidden_size // config.num_attention_heads) self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads self.scaling = self.head_dim ** (-0.5) self.attention_dropout = config.attention_dropout self.is_causal = True self.sliding_window = config.sliding_window if config.layer_types[layer_idx] == 'sliding_attention' else None self.q_proj = nn.Linear(config.hidden_size, config.num_attention_heads * self.head_dim, bias=config.attention_bias) self.k_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias) self.v_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias) self.o_proj = nn.Linear(config.num_attention_heads * self.head_dim, config.hidden_size, bias=config.attention_bias) @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states: torch.Tensor, position_embeddings: tuple[torch.Tensor, torch.Tensor], attention_mask: Optional[torch.Tensor], past_key_values: Optional[Cache]=None, cache_position: Optional[torch.LongTensor]=None, **kwargs: Unpack[FlashAttentionKwargs]) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: input_shape = hidden_states.shape[:-1] hidden_shape = (*input_shape, -1, self.head_dim) query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2) key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2) value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2) cos, sin = position_embeddings if self.sliding_window is not None: query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) if past_key_values is not None: cache_kwargs = {'sin': sin, 'cos': cos, 'cache_position': cache_position} key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs) attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != 'eager': attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] attn_output, attn_weights = attention_interface(self, query_states, key_states, value_states, attention_mask, dropout=0.0 if not self.training else self.attention_dropout, scaling=self.scaling, sliding_window=self.sliding_window, **kwargs) attn_output = attn_output.reshape(*input_shape, -1).contiguous() attn_output = self.o_proj(attn_output) return (attn_output, attn_weights)
class Cohere2Attention(CohereAttention): '''Multi-headed attention from 'Attention Is All You Need' paper''' def __init__(self, config: Cohere2Config, layer_idx: Optional[int]=None): pass @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states: torch.Tensor, position_embeddings: tuple[torch.Tensor, torch.Tensor], attention_mask: Optional[torch.Tensor], past_key_values: Optional[Cache]=None, cache_position: Optional[torch.LongTensor]=None, **kwargs: Unpack[FlashAttentionKwargs]) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: pass
4
1
42
4
38
1
5
0.03
2
5
3
0
2
12
2
14
88
10
76
33
65
2
39
25
36
7
2
2
9
1,319
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/cohere2/modular_cohere2.py
transformers.models.cohere2.modular_cohere2.Cohere2Config
from ...configuration_utils import PretrainedConfig, layer_type_validation from ...modeling_rope_utils import rope_config_validation class Cohere2Config(PretrainedConfig): """ This is the configuration class to store the configuration of a [`CohereModel`]. It is used to instantiate an Cohere model according to the specified arguments, defining the model architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Instantiating a configuration with the defaults will yield a similar configuration to that of the [CohereForAI/c4ai-command-r-v01](https://huggingface.co/CohereForAI/c4ai-command-r-v01) model. Args: vocab_size (`int`, *optional*, defaults to 256000): Vocabulary size of the Cohere model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`CohereModel`] hidden_size (`int`, *optional*, defaults to 8192): Dimension of the hidden representations. intermediate_size (`int`, *optional*, defaults to 22528): Dimension of the MLP representations. logit_scale (`float`, *optional*, defaults to 0.0625): The scaling factor for the output logits. num_hidden_layers (`int`, *optional*, defaults to 40): Number of hidden layers in the Transformer decoder. num_attention_heads (`int`, *optional*, defaults to 64): Number of attention heads for each attention layer in the Transformer decoder. num_key_value_heads (`int`, *optional*): This is the number of key_value heads that should be used to implement Grouped Query Attention. If `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed by meanpooling all the original heads within that group. For more details, check out [this paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to `num_attention_heads`. hidden_act (`str` or `function`, *optional*, defaults to `"silu"`): The non-linear activation function (function or string) in the decoder. max_position_embeddings (`int`, *optional*, defaults to 8192): The maximum sequence length that this model might ever be used with. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-05): The epsilon used by the layer normalization. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). Only relevant if `config.is_decoder=True`. pad_token_id (`int`, *optional*, defaults to 0): Padding token id. bos_token_id (`int`, *optional*, defaults to 5): Beginning of stream token id. eos_token_id (`int`, *optional*, defaults to 255001): End of stream token id. tie_word_embeddings (`bool`, *optional*, defaults to `True`): Whether to tie weight embeddings rope_theta (`float`, *optional*, defaults to 10000.0): The base period of the RoPE embeddings. rope_scaling (`Dict`, *optional*): Dictionary containing the scaling configuration for the RoPE embeddings. NOTE: if you apply new rope type and you expect the model to work on longer `max_position_embeddings`, we recommend you to update this value accordingly. Expected contents: `rope_type` (`str`): The sub-variant of RoPE to use. Can be one of ['default', 'linear', 'dynamic', 'yarn', 'longrope', 'llama3'], with 'default' being the original RoPE implementation. `factor` (`float`, *optional*): Used with all rope types except 'default'. The scaling factor to apply to the RoPE embeddings. In most scaling types, a `factor` of x will enable the model to handle sequences of length x * original maximum pre-trained length. `original_max_position_embeddings` (`int`, *optional*): Used with 'dynamic', 'longrope' and 'llama3'. The original max position embeddings used during pretraining. `attention_factor` (`float`, *optional*): Used with 'yarn' and 'longrope'. The scaling factor to be applied on the attention computation. If unspecified, it defaults to value recommended by the implementation, using the `factor` field to infer the suggested value. `beta_fast` (`float`, *optional*): Only used with 'yarn'. Parameter to set the boundary for extrapolation (only) in the linear ramp function. If unspecified, it defaults to 32. `beta_slow` (`float`, *optional*): Only used with 'yarn'. Parameter to set the boundary for interpolation (only) in the linear ramp function. If unspecified, it defaults to 1. `short_factor` (`list[float]`, *optional*): Only used with 'longrope'. The scaling factor to be applied to short contexts (< `original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden size divided by the number of attention heads divided by 2 `long_factor` (`list[float]`, *optional*): Only used with 'longrope'. The scaling factor to be applied to long contexts (< `original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden size divided by the number of attention heads divided by 2 `low_freq_factor` (`float`, *optional*): Only used with 'llama3'. Scaling factor applied to low frequency components of the RoPE `high_freq_factor` (`float`, *optional*): Only used with 'llama3'. Scaling factor applied to high frequency components of the RoPE attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`): Whether to use a bias in the query, key, value and output projection layers during self-attention. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. sliding_window (`int`, *optional*, defaults to 4096): Size of the sliding window attention context. layer_types (`list`, *optional*): Attention pattern for each layer. ```python >>> from transformers import Cohere2Model, Cohere2Config >>> # Initializing a Cohere Nextmodel configuration >>> configuration = Cohere2Config() >>> # Initializing a model from the Cohere2 configuration >>> model = Cohere2Model(configuration) # doctest: +SKIP >>> # Accessing the model configuration >>> configuration = model.config # doctest: +SKIP ``` """ model_type = 'cohere2' keys_to_ignore_at_inference = ['past_key_values'] base_model_tp_plan = {'layers.*.self_attn.q_proj': 'colwise', 'layers.*.self_attn.k_proj': 'colwise', 'layers.*.self_attn.v_proj': 'colwise', 'layers.*.self_attn.o_proj': 'rowwise', 'layers.*.mlp.gate_proj': 'colwise', 'layers.*.mlp.up_proj': 'colwise', 'layers.*.mlp.down_proj': 'rowwise'} base_model_pp_plan = {'embed_tokens': (['input_ids'], ['inputs_embeds']), 'layers': (['hidden_states', 'attention_mask'], ['hidden_states']), 'norm': (['hidden_states'], ['hidden_states'])} def __init__(self, vocab_size=256000, hidden_size=8192, intermediate_size=22528, logit_scale=0.0625, num_hidden_layers=40, num_attention_heads=64, num_key_value_heads=None, hidden_act='silu', max_position_embeddings=8192, initializer_range=0.02, layer_norm_eps=1e-05, use_cache=True, pad_token_id=0, bos_token_id=5, eos_token_id=255001, tie_word_embeddings=True, rope_theta=10000.0, rope_scaling=None, attention_bias=False, attention_dropout=0.0, sliding_window=4096, layer_types=None, **kwargs): self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.hidden_size = hidden_size self.logit_scale = logit_scale self.intermediate_size = intermediate_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads if num_key_value_heads is None: num_key_value_heads = num_attention_heads self.num_key_value_heads = num_key_value_heads self.hidden_act = hidden_act self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.use_cache = use_cache self.rope_theta = rope_theta self.rope_scaling = rope_scaling self.attention_bias = attention_bias self.attention_dropout = attention_dropout self.sliding_window = sliding_window self.layer_types = layer_types self.head_dim = hidden_size // num_attention_heads rope_config_validation(self) super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs) self._sliding_window_pattern = kwargs.get('sliding_window_pattern', 4) if self.layer_types is None: self._sliding_window_pattern = getattr(self, 'sliding_window_pattern', 4) self.layer_types = ['sliding_attention' if bool((i + 1) % self._sliding_window_pattern) else 'full_attention' for i in range(self.num_hidden_layers)] layer_type_validation(self.layer_types, self.num_hidden_layers)
class Cohere2Config(PretrainedConfig): ''' This is the configuration class to store the configuration of a [`CohereModel`]. It is used to instantiate an Cohere model according to the specified arguments, defining the model architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Instantiating a configuration with the defaults will yield a similar configuration to that of the [CohereForAI/c4ai-command-r-v01](https://huggingface.co/CohereForAI/c4ai-command-r-v01) model. Args: vocab_size (`int`, *optional*, defaults to 256000): Vocabulary size of the Cohere model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`CohereModel`] hidden_size (`int`, *optional*, defaults to 8192): Dimension of the hidden representations. intermediate_size (`int`, *optional*, defaults to 22528): Dimension of the MLP representations. logit_scale (`float`, *optional*, defaults to 0.0625): The scaling factor for the output logits. num_hidden_layers (`int`, *optional*, defaults to 40): Number of hidden layers in the Transformer decoder. num_attention_heads (`int`, *optional*, defaults to 64): Number of attention heads for each attention layer in the Transformer decoder. num_key_value_heads (`int`, *optional*): This is the number of key_value heads that should be used to implement Grouped Query Attention. If `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed by meanpooling all the original heads within that group. For more details, check out [this paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to `num_attention_heads`. hidden_act (`str` or `function`, *optional*, defaults to `"silu"`): The non-linear activation function (function or string) in the decoder. max_position_embeddings (`int`, *optional*, defaults to 8192): The maximum sequence length that this model might ever be used with. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-05): The epsilon used by the layer normalization. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). Only relevant if `config.is_decoder=True`. pad_token_id (`int`, *optional*, defaults to 0): Padding token id. bos_token_id (`int`, *optional*, defaults to 5): Beginning of stream token id. eos_token_id (`int`, *optional*, defaults to 255001): End of stream token id. tie_word_embeddings (`bool`, *optional*, defaults to `True`): Whether to tie weight embeddings rope_theta (`float`, *optional*, defaults to 10000.0): The base period of the RoPE embeddings. rope_scaling (`Dict`, *optional*): Dictionary containing the scaling configuration for the RoPE embeddings. NOTE: if you apply new rope type and you expect the model to work on longer `max_position_embeddings`, we recommend you to update this value accordingly. Expected contents: `rope_type` (`str`): The sub-variant of RoPE to use. Can be one of ['default', 'linear', 'dynamic', 'yarn', 'longrope', 'llama3'], with 'default' being the original RoPE implementation. `factor` (`float`, *optional*): Used with all rope types except 'default'. The scaling factor to apply to the RoPE embeddings. In most scaling types, a `factor` of x will enable the model to handle sequences of length x * original maximum pre-trained length. `original_max_position_embeddings` (`int`, *optional*): Used with 'dynamic', 'longrope' and 'llama3'. The original max position embeddings used during pretraining. `attention_factor` (`float`, *optional*): Used with 'yarn' and 'longrope'. The scaling factor to be applied on the attention computation. If unspecified, it defaults to value recommended by the implementation, using the `factor` field to infer the suggested value. `beta_fast` (`float`, *optional*): Only used with 'yarn'. Parameter to set the boundary for extrapolation (only) in the linear ramp function. If unspecified, it defaults to 32. `beta_slow` (`float`, *optional*): Only used with 'yarn'. Parameter to set the boundary for interpolation (only) in the linear ramp function. If unspecified, it defaults to 1. `short_factor` (`list[float]`, *optional*): Only used with 'longrope'. The scaling factor to be applied to short contexts (< `original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden size divided by the number of attention heads divided by 2 `long_factor` (`list[float]`, *optional*): Only used with 'longrope'. The scaling factor to be applied to long contexts (< `original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden size divided by the number of attention heads divided by 2 `low_freq_factor` (`float`, *optional*): Only used with 'llama3'. Scaling factor applied to low frequency components of the RoPE `high_freq_factor` (`float`, *optional*): Only used with 'llama3'. Scaling factor applied to high frequency components of the RoPE attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`): Whether to use a bias in the query, key, value and output projection layers during self-attention. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. sliding_window (`int`, *optional*, defaults to 4096): Size of the sliding window attention context. layer_types (`list`, *optional*): Attention pattern for each layer. ```python >>> from transformers import Cohere2Model, Cohere2Config >>> # Initializing a Cohere Nextmodel configuration >>> configuration = Cohere2Config() >>> # Initializing a model from the Cohere2 configuration >>> model = Cohere2Model(configuration) # doctest: +SKIP >>> # Accessing the model configuration >>> configuration = model.config # doctest: +SKIP ``` ''' def __init__(self, vocab_size=256000, hidden_size=8192, intermediate_size=22528, logit_scale=0.0625, num_hidden_layers=40, num_attention_heads=64, num_key_value_heads=None, hidden_act='silu', max_position_embeddings=8192, initializer_range=0.02, layer_norm_eps=1e-05, use_cache=True, pad_token_id=0, bos_token_id=5, eos_token_id=255001, tie_word_embeddings=True, rope_theta=10000.0, rope_scaling=None, attention_bias=False, attention_dropout=0.0, sliding_window=4096, layer_types=None, **kwargs): pass
2
1
64
4
57
3
2
1.57
1
1
0
0
1
20
1
33
190
13
69
51
41
108
29
25
27
2
2
1
2
1,320
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/cohere2/modular_cohere2.py
transformers.models.cohere2.modular_cohere2.Cohere2DecoderLayer
import torch from ...cache_utils import Cache, DynamicCache from ..cohere.modeling_cohere import CohereAttention, CohereDecoderLayer, CohereForCausalLM, CohereLayerNorm, CoherePreTrainedModel, CohereRotaryEmbedding, apply_rotary_pos_emb, eager_attention_forward from ...processing_utils import Unpack from typing import Callable, Optional from ...modeling_flash_attention_utils import FlashAttentionKwargs from ...utils.deprecation import deprecate_kwarg import torch.nn as nn class Cohere2DecoderLayer(CohereDecoderLayer): def __init__(self, config: Cohere2Config, layer_idx: int): super().__init__(config, layer_idx) self.attention_type = config.layer_types[layer_idx] @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states: torch.Tensor, position_embeddings: tuple[torch.Tensor, torch.Tensor], attention_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, use_cache: Optional[bool]=False, cache_position: Optional[torch.LongTensor]=None, **kwargs: Unpack[FlashAttentionKwargs]) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]: residual = hidden_states hidden_states = self.input_layernorm(hidden_states) hidden_states_attention, _ = self.self_attn(hidden_states=hidden_states, position_embeddings=position_embeddings, attention_mask=attention_mask, past_key_values=past_key_values, use_cache=use_cache, cache_position=cache_position, **kwargs) hidden_states_mlp = self.mlp(hidden_states) hidden_states = residual + hidden_states_attention + hidden_states_mlp return hidden_states
class Cohere2DecoderLayer(CohereDecoderLayer): def __init__(self, config: Cohere2Config, layer_idx: int): pass @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states: torch.Tensor, position_embeddings: tuple[torch.Tensor, torch.Tensor], attention_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, use_cache: Optional[bool]=False, cache_position: Optional[torch.LongTensor]=None, **kwargs: Unpack[FlashAttentionKwargs]) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]: pass
4
0
45
5
25
16
3
0.64
1
8
4
0
2
4
2
14
91
10
50
26
36
32
27
15
24
4
2
2
5
1,321
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/cohere2/modular_cohere2.py
transformers.models.cohere2.modular_cohere2.Cohere2ForCausalLM
from ..cohere.modeling_cohere import CohereAttention, CohereDecoderLayer, CohereForCausalLM, CohereLayerNorm, CoherePreTrainedModel, CohereRotaryEmbedding, apply_rotary_pos_emb, eager_attention_forward class Cohere2ForCausalLM(CohereForCausalLM): pass
class Cohere2ForCausalLM(CohereForCausalLM): pass
1
0
44
4
33
10
6
0.29
1
3
2
0
2
0
2
11
90
8
66
17
52
19
29
6
26
11
3
2
12
1,322
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/cohere2/modular_cohere2.py
transformers.models.cohere2.modular_cohere2.Cohere2LayerNorm
from ..cohere.modeling_cohere import CohereAttention, CohereDecoderLayer, CohereForCausalLM, CohereLayerNorm, CoherePreTrainedModel, CohereRotaryEmbedding, apply_rotary_pos_emb, eager_attention_forward class Cohere2LayerNorm(CohereLayerNorm): pass
class Cohere2LayerNorm(CohereLayerNorm): pass
1
0
0
0
0
0
0
0
1
0
0
0
0
0
0
12
2
0
2
1
1
0
2
1
1
0
2
0
0
1,323
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/cohere2/modular_cohere2.py
transformers.models.cohere2.modular_cohere2.Cohere2Model
from ...utils import TransformersKwargs, logging from typing import Callable, Optional from ..gemma2.modeling_gemma2 import Gemma2Model import torch.nn as nn from ...cache_utils import Cache, DynamicCache from ...modeling_outputs import BaseModelOutputWithPast from ...masking_utils import create_causal_mask, create_sliding_window_causal_mask import torch from ...processing_utils import Unpack class Cohere2Model(Gemma2Model): def __init__(self, config: Cohere2Config): super().__init__(config) self.norm = Cohere2LayerNorm(hidden_size=config.hidden_size, eps=config.layer_norm_eps) self.rotary_emb = Cohere2RotaryEmbedding(config=config) def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> BaseModelOutputWithPast: if (input_ids is None) ^ (inputs_embeds is not None): raise ValueError('You must specify exactly one of input_ids or inputs_embeds') if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) if use_cache and past_key_values is None and (not self.training): past_key_values = DynamicCache(config=self.config) if cache_position is None: past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 cache_position = torch.arange(past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device) if position_ids is None: position_ids = cache_position.unsqueeze(0) if not isinstance((causal_mask_mapping := attention_mask), dict): mask_kwargs = {'config': self.config, 'input_embeds': inputs_embeds, 'attention_mask': attention_mask, 'cache_position': cache_position, 'past_key_values': past_key_values, 'position_ids': position_ids} causal_mask_mapping = {'full_attention': create_causal_mask(**mask_kwargs), 'sliding_attention': create_sliding_window_causal_mask(**mask_kwargs)} hidden_states = inputs_embeds position_embeddings = self.rotary_emb(hidden_states, position_ids) for decoder_layer in self.layers: hidden_states = decoder_layer(hidden_states, position_embeddings=position_embeddings, attention_mask=causal_mask_mapping[decoder_layer.attention_type], past_key_values=past_key_values, use_cache=use_cache, cache_position=cache_position, **kwargs) hidden_states = self.norm(hidden_states) return BaseModelOutputWithPast(last_hidden_state=hidden_states, past_key_values=past_key_values)
class Cohere2Model(Gemma2Model): def __init__(self, config: Cohere2Config): pass def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> BaseModelOutputWithPast: pass
3
0
63
8
51
4
12
0.12
1
11
6
0
2
3
2
9
133
18
103
30
86
12
48
15
45
23
3
2
24
1,324
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/cohere2/modular_cohere2.py
transformers.models.cohere2.modular_cohere2.Cohere2PreTrainedModel
from ..cohere.modeling_cohere import CohereAttention, CohereDecoderLayer, CohereForCausalLM, CohereLayerNorm, CoherePreTrainedModel, CohereRotaryEmbedding, apply_rotary_pos_emb, eager_attention_forward class Cohere2PreTrainedModel(CoherePreTrainedModel): config: Cohere2Config
class Cohere2PreTrainedModel(CoherePreTrainedModel): pass
1
0
0
0
0
0
0
0
1
0
0
0
0
0
0
1
2
0
2
2
1
0
2
2
1
0
2
0
0
1,325
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/cohere2/modular_cohere2.py
transformers.models.cohere2.modular_cohere2.Cohere2RotaryEmbedding
from ..cohere.modeling_cohere import CohereAttention, CohereDecoderLayer, CohereForCausalLM, CohereLayerNorm, CoherePreTrainedModel, CohereRotaryEmbedding, apply_rotary_pos_emb, eager_attention_forward class Cohere2RotaryEmbedding(CohereRotaryEmbedding): pass
class Cohere2RotaryEmbedding(CohereRotaryEmbedding): pass
1
0
0
0
0
0
0
0
1
0
0
0
0
0
0
13
2
0
2
1
1
0
2
1
1
0
2
0
0
1,326
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/colpali/configuration_colpali.py
transformers.models.colpali.configuration_colpali.ColPaliConfig
from ..auto import CONFIG_MAPPING, AutoConfig from copy import deepcopy from ...configuration_utils import PretrainedConfig class ColPaliConfig(PretrainedConfig): """ Configuration class to store the configuration of a [`ColPaliForRetrieval`]. It is used to instantiate an instance of `ColPaliForRetrieval` according to the specified arguments, defining the model architecture following the methodology from the "ColPali: Efficient Document Retrieval with Vision Language Models" paper. Creating a configuration with the default settings will result in a configuration where the VLM backbone is set to the default PaliGemma configuration, i.e the one from [vidore/colpali-v1.2](https://huggingface.co/vidore/colpali-v1.2). Note that contrarily to what the class name suggests (actually the name refers to the ColPali **methodology**), you can use a different VLM backbone model than PaliGemma by passing the corresponding VLM configuration to the class constructor. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vlm_config (`PretrainedConfig`, *optional*): Configuration of the VLM backbone model. text_config (`PretrainedConfig`, *optional*): Configuration of the text backbone model. Overrides the `text_config` attribute of the `vlm_config` if provided. embedding_dim (`int`, *optional*, defaults to 128): Dimension of the multi-vector embeddings produced by the model. Example: ```python from transformers.models.colpali import ColPaliConfig, ColPaliForRetrieval config = ColPaliConfig() model = ColPaliForRetrieval(config) ``` """ model_type = 'colpali' sub_configs = {'vlm_config': PretrainedConfig, 'text_config': AutoConfig} def __init__(self, vlm_config=None, text_config=None, embedding_dim: int=128, **kwargs): if vlm_config is None: vlm_config = CONFIG_MAPPING['paligemma']() logger.info('`vlm_config` is `None`. Initializing `vlm_config` with the `PaliGemmaConfig` with default values.') elif isinstance(vlm_config, dict): vlm_config = deepcopy(vlm_config) if 'model_type' not in vlm_config: raise KeyError('The `model_type` key is missing in the `vlm_config` dictionary. Please provide the model type.') elif vlm_config['model_type'] not in CONFIG_MAPPING: raise ValueError(f"The model type `{vlm_config['model_type']}` is not supported. Please provide a valid model type.") vlm_config = CONFIG_MAPPING[vlm_config['model_type']](**vlm_config) elif isinstance(vlm_config, PretrainedConfig): vlm_config = vlm_config else: raise TypeError(f'Invalid type for `vlm_config`. Expected `PretrainedConfig`, `dict`, or `None`, but got {type(vlm_config)}.') self.vlm_config = vlm_config self.text_config = text_config if text_config is not None else vlm_config.text_config if isinstance(self.text_config, dict): text_config['model_type'] = text_config.get('model_type', 'gemma') self.text_config = CONFIG_MAPPING[text_config['model_type']](**text_config) self.embedding_dim = embedding_dim super().__init__(**kwargs)
class ColPaliConfig(PretrainedConfig): ''' Configuration class to store the configuration of a [`ColPaliForRetrieval`]. It is used to instantiate an instance of `ColPaliForRetrieval` according to the specified arguments, defining the model architecture following the methodology from the "ColPali: Efficient Document Retrieval with Vision Language Models" paper. Creating a configuration with the default settings will result in a configuration where the VLM backbone is set to the default PaliGemma configuration, i.e the one from [vidore/colpali-v1.2](https://huggingface.co/vidore/colpali-v1.2). Note that contrarily to what the class name suggests (actually the name refers to the ColPali **methodology**), you can use a different VLM backbone model than PaliGemma by passing the corresponding VLM configuration to the class constructor. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vlm_config (`PretrainedConfig`, *optional*): Configuration of the VLM backbone model. text_config (`PretrainedConfig`, *optional*): Configuration of the text backbone model. Overrides the `text_config` attribute of the `vlm_config` if provided. embedding_dim (`int`, *optional*, defaults to 128): Dimension of the multi-vector embeddings produced by the model. Example: ```python from transformers.models.colpali import ColPaliConfig, ColPaliForRetrieval config = ColPaliConfig() model = ColPaliForRetrieval(config) ``` ''' def __init__(self, vlm_config=None, text_config=None, embedding_dim: int=128, **kwargs): pass
2
1
39
3
36
0
9
0.64
1
7
0
0
1
3
1
1
77
13
39
13
31
25
21
7
19
9
1
2
9
1,327
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/colpali/modeling_colpali.py
transformers.models.colpali.modeling_colpali.ColPaliForRetrieval
from typing import Optional from .configuration_colpali import ColPaliConfig from transformers import AutoModelForImageTextToText from ...utils import ModelOutput, auto_docstring, can_return_tuple from torch import nn import torch @auto_docstring(custom_intro='\n The ColPali architecture leverages VLMs to construct efficient multi-vector embeddings directly\n from document images (“screenshots”) for document retrieval. The model is trained to maximize the similarity\n between these document embeddings and the corresponding query embeddings, using the late interaction method\n introduced in ColBERT.\n\n Using ColPali removes the need for potentially complex and brittle layout recognition and OCR pipelines with a\n single model that can take into account both the textual and visual content (layout, charts, etc.) of a document.\n\n ColPali is part of the ColVision model family, which was first introduced in the following paper:\n [*ColPali: Efficient Document Retrieval with Vision Language Models*](https://huggingface.co/papers/2407.01449).\n ') class ColPaliForRetrieval(ColPaliPreTrainedModel): _checkpoint_conversion_mapping = {'vlm.language_model.model': 'vlm.model.language_model', 'vlm.vision_tower': 'vlm.model.vision_tower', 'vlm.multi_modal_projector': 'vlm.model.multi_modal_projector', 'vlm.language_model.lm_head': 'vlm.lm_head'} def __init__(self, config: ColPaliConfig): super().__init__(config) self.config = config self.vocab_size = config.vlm_config.text_config.vocab_size self.vlm = AutoModelForImageTextToText.from_config(config.vlm_config) self._tied_weights_keys = [f'vlm.language_model.{k}' for k in self.vlm._tied_weights_keys or []] self.embedding_dim = self.config.embedding_dim self.embedding_proj_layer = nn.Linear(self.config.vlm_config.text_config.hidden_size, self.embedding_dim) self.post_init() @can_return_tuple @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, pixel_values: Optional[torch.FloatTensor]=None, attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, **kwargs) -> ColPaliForRetrievalOutput: if pixel_values is not None: pixel_values = pixel_values.to(dtype=self.dtype) output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict vlm_output = self.vlm.model(input_ids=input_ids, attention_mask=attention_mask, pixel_values=pixel_values, output_hidden_states=True, return_dict=True, output_attentions=output_attentions, **kwargs) vlm_hidden_states = vlm_output.hidden_states if output_hidden_states else None vlm_image_hidden_states = vlm_output.image_hidden_states if pixel_values is not None else None last_hidden_states = vlm_output[0] proj_dtype = self.embedding_proj_layer.weight.dtype embeddings = self.embedding_proj_layer(last_hidden_states.to(proj_dtype)) embeddings = embeddings / embeddings.norm(dim=-1, keepdim=True) if attention_mask is not None: embeddings = embeddings * attention_mask.unsqueeze(-1) return ColPaliForRetrievalOutput(embeddings=embeddings, past_key_values=vlm_output.past_key_values, hidden_states=vlm_hidden_states, attentions=vlm_output.attentions, image_hidden_states=vlm_image_hidden_states) def get_input_embeddings(self): return self.vlm.get_input_embeddings() def set_input_embeddings(self, value): self.vlm.set_input_embeddings(value) def get_output_embeddings(self): return self.vlm.get_output_embeddings() def set_output_embeddings(self, new_embeddings): self.vlm.set_output_embeddings(new_embeddings) def tie_weights(self): return self.vlm.tie_weights() def resize_token_embeddings(self, new_num_tokens: Optional[int]=None, pad_to_multiple_of: Optional[int]=None, mean_resizing: bool=True) -> nn.Embedding: model_embeds = self.vlm.resize_token_embeddings(new_num_tokens=new_num_tokens, pad_to_multiple_of=pad_to_multiple_of, mean_resizing=mean_resizing) self.config.vlm_config.text_config.vocab_size = model_embeds.num_embeddings self.config.vlm_config.vocab_size = model_embeds.num_embeddings self.vlm.vocab_size = model_embeds.num_embeddings self.vocab_size = model_embeds.num_embeddings return model_embeds
@auto_docstring(custom_intro='\n The ColPali architecture leverages VLMs to construct efficient multi-vector embeddings directly\n from document images (“screenshots”) for document retrieval. The model is trained to maximize the similarity\n between these document embeddings and the corresponding query embeddings, using the late interaction method\n introduced in ColBERT.\n\n Using ColPali removes the need for potentially complex and brittle layout recognition and OCR pipelines with a\n single model that can take into account both the textual and visual content (layout, charts, etc.) of a document.\n\n ColPali is part of the ColVision model family, which was first introduced in the following paper:\n [*ColPali: Efficient Document Retrieval with Vision Language Models*](https://huggingface.co/papers/2407.01449).\n ') class ColPaliForRetrieval(ColPaliPreTrainedModel): def __init__(self, config: ColPaliConfig): pass @can_return_tuple @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, pixel_values: Optional[torch.FloatTensor]=None, attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, **kwargs) -> ColPaliForRetrievalOutput: pass def get_input_embeddings(self): pass def set_input_embeddings(self, value): pass def get_output_embeddings(self): pass def set_output_embeddings(self, new_embeddings): pass def tie_weights(self): pass def resize_token_embeddings(self, new_num_tokens: Optional[int]=None, pad_to_multiple_of: Optional[int]=None, mean_resizing: bool=True) -> nn.Embedding: pass
12
0
10
1
9
1
2
0.09
1
7
2
0
10
6
10
11
116
21
91
39
64
8
51
24
40
11
2
1
21
1,328
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/colpali/modeling_colpali.py
transformers.models.colpali.modeling_colpali.ColPaliForRetrievalOutput
from ...cache_utils import Cache from dataclasses import dataclass from typing import Optional from ...utils import ModelOutput, auto_docstring, can_return_tuple import torch @dataclass @auto_docstring(custom_intro='\n Base class for ColPali embeddings output.\n ') class ColPaliForRetrievalOutput(ModelOutput): """ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Language modeling loss (for next-token prediction). embeddings (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): The embeddings of the model. past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache). Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. image_hidden_states (`torch.FloatTensor`, *optional*): A `torch.FloatTensor` of size `(batch_size, num_images, sequence_length, hidden_size)`. image_hidden_states of the model produced by the vision encoder after projecting last hidden state. """ loss: Optional[torch.FloatTensor] = None embeddings: Optional[torch.Tensor] = None past_key_values: Optional[Cache] = None hidden_states: Optional[tuple[torch.FloatTensor]] = None attentions: Optional[tuple[torch.FloatTensor]] = None image_hidden_states: Optional[torch.FloatTensor] = None
@dataclass @auto_docstring(custom_intro='\n Base class for ColPali embeddings output.\n ') class ColPaliForRetrievalOutput(ModelOutput): ''' loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Language modeling loss (for next-token prediction). embeddings (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): The embeddings of the model. past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache). Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. image_hidden_states (`torch.FloatTensor`, *optional*): A `torch.FloatTensor` of size `(batch_size, num_images, sequence_length, hidden_size)`. image_hidden_states of the model produced by the vision encoder after projecting last hidden state. ''' pass
3
1
0
0
0
0
0
3.57
1
0
0
0
0
0
0
0
37
5
7
7
6
25
7
7
6
0
1
0
0
1,329
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/colpali/modeling_colpali.py
transformers.models.colpali.modeling_colpali.ColPaliPreTrainedModel
from ...utils import ModelOutput, auto_docstring, can_return_tuple from .configuration_colpali import ColPaliConfig from ...modeling_utils import PreTrainedModel from torch import nn @auto_docstring class ColPaliPreTrainedModel(PreTrainedModel): config: ColPaliConfig base_model_prefix = 'model' _no_split_modules = [] _supports_sdpa = True _supports_flash_attn = True _supports_flex_attn = True def _init_weights(self, module): std = self.config.initializer_range if hasattr(self.config, 'initializer_range') else self.config.vlm_config.text_config.initializer_range if isinstance(module, (nn.Linear, nn.Conv2d)): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_()
@auto_docstring class ColPaliPreTrainedModel(PreTrainedModel): def _init_weights(self, module): pass
3
0
15
1
14
0
6
0
1
0
0
1
1
0
1
1
20
2
18
6
16
0
13
6
11
6
1
2
6
1,330
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/colpali/modular_colpali.py
transformers.models.colpali.modular_colpali.ColPaliProcessor
from transformers.models.paligemma.processing_paligemma import IMAGE_TOKEN, PaliGemmaProcessor, build_string_from_input from ...tokenization_utils_base import PreTokenizedInput, TextInput from ...feature_extraction_utils import BatchFeature from ...processing_utils import ProcessingKwargs, Unpack from typing import Optional, Union from ...image_utils import ImageInput, make_flat_list_of_images class ColPaliProcessor(PaliGemmaProcessor): """ Constructs a ColPali processor which wraps a PaliGemmaProcessor and special methods to process images and queries, as well as to compute the late-interaction retrieval score. [`ColPaliProcessor`] offers all the functionalities of [`PaliGemmaProcessor`]. See the [`~PaliGemmaProcessor.__call__`] for more information. Args: image_processor ([`SiglipImageProcessor`], *optional*): The image processor is a required input. tokenizer ([`LlamaTokenizerFast`], *optional*): The tokenizer is a required input. chat_template (`str`, *optional*): A Jinja template which will be used to convert lists of messages in a chat into a tokenizable string. visual_prompt_prefix (`str`, *optional*, defaults to `"Describe the image."`): A string that gets tokenized and prepended to the image tokens. query_prefix (`str`, *optional*, defaults to `"Question: "`): A prefix to be used for the query. """ def __init__(self, image_processor=None, tokenizer=None, chat_template=None, visual_prompt_prefix: str='Describe the image.', query_prefix: str='Question: '): super().__init__(image_processor=image_processor, tokenizer=tokenizer, chat_template=chat_template) self.visual_prompt_prefix = visual_prompt_prefix self.query_prefix = query_prefix @property def query_augmentation_token(self) -> str: """ Return the query augmentation token. Query augmentation buffers are used as reasoning buffers during inference. """ return self.tokenizer.pad_token def __call__(self, images: Optional[ImageInput]=None, text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]]=None, audio=None, videos=None, **kwargs: Unpack[ColPaliProcessorKwargs]) -> BatchFeature: """ Main method to prepare for the model either (1) one or several texts, either (2) one or several image(s). This method is a custom wrapper around the PaliGemmaProcessor's [`~PaliGemmaProcessor.__call__`] method adapted for the ColPali model. It cannot process both text and images at the same time. When preparing the text(s), this method forwards the `text` and `kwargs` arguments to LlamaTokenizerFast's [`~LlamaTokenizerFast.__call__`]. When preparing the image(s), this method forwards the `images` and `kwargs` arguments to SiglipImageProcessor's [`~SiglipImageProcessor.__call__`]. Please refer to the docstring of the above two methods for more information. Args: images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `list[PIL.Image.Image]`, `list[np.ndarray]`, `list[torch.Tensor]`): The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch tensor. In case of a NumPy array/PyTorch tensor, each image should be of shape (C, H, W), where C is a number of channels, H and W are image height and width. text (`str`, `list[str]`, `list[list[str]]`): The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set `is_split_into_words=True` (to lift the ambiguity with a batch of sequences). return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors of a particular framework. Acceptable values are: - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return NumPy `np.ndarray` objects. Returns: [`BatchFeature`]: A [`BatchFeature`] with the following fields: - **input_ids** -- List of token ids to be fed to a model. - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not `None`). - **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`. """ output_kwargs = self._merge_kwargs(ColPaliProcessorKwargs, tokenizer_init_kwargs=self.tokenizer.init_kwargs, **kwargs) suffix = output_kwargs['text_kwargs'].pop('suffix', None) return_token_type_ids = suffix is not None if text is None and images is None: raise ValueError('Either text or images must be provided') if text is not None and images is not None: raise ValueError('Only one of text or images can be processed at a time') if images is not None: images = self.image_processor.fetch_images(images) images = make_flat_list_of_images(images) texts_doc = [self.visual_prompt_prefix] * len(images) images = [image.convert('RGB') for image in images] input_strings = [build_string_from_input(prompt=prompt, bos_token=self.tokenizer.bos_token, image_seq_len=self.image_seq_length, image_token=IMAGE_TOKEN, num_images=len(image_list) if isinstance(image_list, list) else 1) for prompt, image_list in zip(texts_doc, images)] pixel_values = self.image_processor(images, **output_kwargs['images_kwargs'])['pixel_values'] if output_kwargs['text_kwargs'].get('max_length', None) is not None: output_kwargs['text_kwargs']['max_length'] += self.image_seq_length inputs = self.tokenizer(input_strings, return_token_type_ids=False, **output_kwargs['text_kwargs']) return_data = {**inputs, 'pixel_values': pixel_values} if return_token_type_ids: labels = inputs['input_ids'].masked_fill(inputs['token_type_ids'] == 0, -100) return_data.update({'labels': labels}) return BatchFeature(data=return_data) elif text is not None: if isinstance(text, str): text = [text] elif not (isinstance(text, list) and isinstance(text[0], str)): raise ValueError('Text must be a string or a list of strings') if suffix is None: suffix = self.query_augmentation_token * 10 texts_query: list[str] = [] for query in text: query = self.tokenizer.bos_token + self.query_prefix + query + suffix + '\n' texts_query.append(query) output_kwargs['text_kwargs']['max_length'] = output_kwargs['text_kwargs'].get('max_length', 50) batch_query = self.tokenizer(texts_query, return_token_type_ids=False, **output_kwargs['text_kwargs']) return batch_query def process_images(self, images: Optional[ImageInput]=None, **kwargs: Unpack[ColPaliProcessorKwargs]) -> BatchFeature: """ Prepare for the model one or several image(s). This method is a wrapper around the `__call__` method of the ColPaliProcessor's [`ColPaliProcessor.__call__`]. This method forwards the `images` and `kwargs` arguments to the image processor. Args: images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `list[PIL.Image.Image]`, `list[np.ndarray]`, `list[torch.Tensor]`): The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch tensor. In case of a NumPy array/PyTorch tensor, each image should be of shape (C, H, W), where C is a number of channels, H and W are image height and width. return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors of a particular framework. Acceptable values are: - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return NumPy `np.ndarray` objects. Returns: [`BatchFeature`]: A [`BatchFeature`] with the following fields: - **input_ids** -- List of token ids to be fed to a model. - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not `None`). - **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`. """ return self.__call__(images=images, **kwargs) def process_queries(self, text: Union[TextInput, list[TextInput]], **kwargs: Unpack[ColPaliProcessorKwargs]) -> BatchFeature: """ Prepare for the model one or several texts. This method is a wrapper around the `__call__` method of the ColPaliProcessor's [`ColPaliProcessor.__call__`]. This method forwards the `text` and `kwargs` arguments to the tokenizer. Args: text (`str`, `list[str]`, `list[list[str]]`): The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set `is_split_into_words=True` (to lift the ambiguity with a batch of sequences). return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors of a particular framework. Acceptable values are: - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return NumPy `np.ndarray` objects. Returns: [`BatchFeature`]: A [`BatchFeature`] with the following fields: - **input_ids** -- List of token ids to be fed to a model. - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not `None`). """ return self.__call__(text=text, **kwargs) def score_retrieval(self, query_embeddings: Union['torch.Tensor', list['torch.Tensor']], passage_embeddings: Union['torch.Tensor', list['torch.Tensor']], batch_size: int=128, output_dtype: Optional['torch.dtype']=None, output_device: Union['torch.device', str]='cpu') -> 'torch.Tensor': """ Compute the late-interaction/MaxSim score (ColBERT-like) for the given multi-vector query embeddings (`qs`) and passage embeddings (`ps`). For ColPali, a passage is the image of a document page. Because the embedding tensors are multi-vector and can thus have different shapes, they should be fed as: (1) a list of tensors, where the i-th tensor is of shape (sequence_length_i, embedding_dim) (2) a single tensor of shape (n_passages, max_sequence_length, embedding_dim) -> usually obtained by padding the list of tensors. Args: query_embeddings (`Union[torch.Tensor, list[torch.Tensor]`): Query embeddings. passage_embeddings (`Union[torch.Tensor, list[torch.Tensor]`): Passage embeddings. batch_size (`int`, *optional*, defaults to 128): Batch size for computing scores. output_dtype (`torch.dtype`, *optional*, defaults to `torch.float32`): The dtype of the output tensor. If `None`, the dtype of the input embeddings is used. output_device (`torch.device` or `str`, *optional*, defaults to "cpu"): The device of the output tensor. Returns: `torch.Tensor`: A tensor of shape `(n_queries, n_passages)` containing the scores. The score tensor is saved on the "cpu" device. """ if len(query_embeddings) == 0: raise ValueError('No queries provided') if len(passage_embeddings) == 0: raise ValueError('No passages provided') if query_embeddings[0].device != passage_embeddings[0].device: raise ValueError('Queries and passages must be on the same device') if query_embeddings[0].dtype != passage_embeddings[0].dtype: raise ValueError('Queries and passages must have the same dtype') if output_dtype is None: output_dtype = query_embeddings[0].dtype scores: list[torch.Tensor] = [] for i in range(0, len(query_embeddings), batch_size): batch_scores: list[torch.Tensor] = [] batch_queries = torch.nn.utils.rnn.pad_sequence(query_embeddings[i:i + batch_size], batch_first=True, padding_value=0) for j in range(0, len(passage_embeddings), batch_size): batch_passages = torch.nn.utils.rnn.pad_sequence(passage_embeddings[j:j + batch_size], batch_first=True, padding_value=0) batch_scores.append(torch.einsum('bnd,csd->bcns', batch_queries, batch_passages).max(dim=3)[0].sum(dim=2)) scores.append(torch.cat(batch_scores, dim=1).to(output_dtype).to(output_device)) return torch.cat(scores, dim=0)
class ColPaliProcessor(PaliGemmaProcessor): ''' Constructs a ColPali processor which wraps a PaliGemmaProcessor and special methods to process images and queries, as well as to compute the late-interaction retrieval score. [`ColPaliProcessor`] offers all the functionalities of [`PaliGemmaProcessor`]. See the [`~PaliGemmaProcessor.__call__`] for more information. Args: image_processor ([`SiglipImageProcessor`], *optional*): The image processor is a required input. tokenizer ([`LlamaTokenizerFast`], *optional*): The tokenizer is a required input. chat_template (`str`, *optional*): A Jinja template which will be used to convert lists of messages in a chat into a tokenizable string. visual_prompt_prefix (`str`, *optional*, defaults to `"Describe the image."`): A string that gets tokenized and prepended to the image tokens. query_prefix (`str`, *optional*, defaults to `"Question: "`): A prefix to be used for the query. ''' def __init__(self, image_processor=None, tokenizer=None, chat_template=None, visual_prompt_prefix: str='Describe the image.', query_prefix: str='Question: '): pass @property def query_augmentation_token(self) -> str: ''' Return the query augmentation token. Query augmentation buffers are used as reasoning buffers during inference. ''' pass def __call__(self, images: Optional[ImageInput]=None, text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]]=None, audio=None, videos=None, **kwargs: Unpack[ColPaliProcessorKwargs]) -> BatchFeature: ''' Main method to prepare for the model either (1) one or several texts, either (2) one or several image(s). This method is a custom wrapper around the PaliGemmaProcessor's [`~PaliGemmaProcessor.__call__`] method adapted for the ColPali model. It cannot process both text and images at the same time. When preparing the text(s), this method forwards the `text` and `kwargs` arguments to LlamaTokenizerFast's [`~LlamaTokenizerFast.__call__`]. When preparing the image(s), this method forwards the `images` and `kwargs` arguments to SiglipImageProcessor's [`~SiglipImageProcessor.__call__`]. Please refer to the docstring of the above two methods for more information. Args: images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `list[PIL.Image.Image]`, `list[np.ndarray]`, `list[torch.Tensor]`): The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch tensor. In case of a NumPy array/PyTorch tensor, each image should be of shape (C, H, W), where C is a number of channels, H and W are image height and width. text (`str`, `list[str]`, `list[list[str]]`): The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set `is_split_into_words=True` (to lift the ambiguity with a batch of sequences). return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors of a particular framework. Acceptable values are: - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return NumPy `np.ndarray` objects. Returns: [`BatchFeature`]: A [`BatchFeature`] with the following fields: - **input_ids** -- List of token ids to be fed to a model. - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not `None`). - **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`. ''' pass def process_images(self, images: Optional[ImageInput]=None, **kwargs: Unpack[ColPaliProcessorKwargs]) -> BatchFeature: ''' Prepare for the model one or several image(s). This method is a wrapper around the `__call__` method of the ColPaliProcessor's [`ColPaliProcessor.__call__`]. This method forwards the `images` and `kwargs` arguments to the image processor. Args: images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `list[PIL.Image.Image]`, `list[np.ndarray]`, `list[torch.Tensor]`): The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch tensor. In case of a NumPy array/PyTorch tensor, each image should be of shape (C, H, W), where C is a number of channels, H and W are image height and width. return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors of a particular framework. Acceptable values are: - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return NumPy `np.ndarray` objects. Returns: [`BatchFeature`]: A [`BatchFeature`] with the following fields: - **input_ids** -- List of token ids to be fed to a model. - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not `None`). - **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`. ''' pass def process_queries(self, text: Union[TextInput, list[TextInput]], **kwargs: Unpack[ColPaliProcessorKwargs]) -> BatchFeature: ''' Prepare for the model one or several texts. This method is a wrapper around the `__call__` method of the ColPaliProcessor's [`ColPaliProcessor.__call__`]. This method forwards the `text` and `kwargs` arguments to the tokenizer. Args: text (`str`, `list[str]`, `list[list[str]]`): The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set `is_split_into_words=True` (to lift the ambiguity with a batch of sequences). return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors of a particular framework. Acceptable values are: - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return NumPy `np.ndarray` objects. Returns: [`BatchFeature`]: A [`BatchFeature`] with the following fields: - **input_ids** -- List of token ids to be fed to a model. - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not `None`). ''' pass def score_retrieval(self, query_embeddings: Union['torch.Tensor', list['torch.Tensor']], passage_embeddings: Union['torch.Tensor', list['torch.Tensor']], batch_size: int=128, output_dtype: Optional['torch.dtype']=None, output_device: Union['torch.device', str]='cpu') -> 'torch.Tensor': ''' Compute the late-interaction/MaxSim score (ColBERT-like) for the given multi-vector query embeddings (`qs`) and passage embeddings (`ps`). For ColPali, a passage is the image of a document page. Because the embedding tensors are multi-vector and can thus have different shapes, they should be fed as: (1) a list of tensors, where the i-th tensor is of shape (sequence_length_i, embedding_dim) (2) a single tensor of shape (n_passages, max_sequence_length, embedding_dim) -> usually obtained by padding the list of tensors. Args: query_embeddings (`Union[torch.Tensor, list[torch.Tensor]`): Query embeddings. passage_embeddings (`Union[torch.Tensor, list[torch.Tensor]`): Passage embeddings. batch_size (`int`, *optional*, defaults to 128): Batch size for computing scores. output_dtype (`torch.dtype`, *optional*, defaults to `torch.float32`): The dtype of the output tensor. If `None`, the dtype of the input embeddings is used. output_device (`torch.device` or `str`, *optional*, defaults to "cpu"): The device of the output tensor. Returns: `torch.Tensor`: A tensor of shape `(n_queries, n_passages)` containing the scores. The score tensor is saved on the "cpu" device. ''' pass
8
6
53
8
24
21
5
0.95
1
9
2
0
5
1
5
27
288
50
123
50
94
117
69
26
63
16
3
2
27
1,331
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/colpali/modular_colpali.py
transformers.models.colpali.modular_colpali.ColPaliProcessorKwargs
from ...processing_utils import ProcessingKwargs, Unpack class ColPaliProcessorKwargs(ProcessingKwargs, total=False): _defaults = {'text_kwargs': {'padding': 'longest'}, 'images_kwargs': {'data_format': 'channels_first', 'do_convert_rgb': True}, 'common_kwargs': {'return_tensors': 'pt'}}
class ColPaliProcessorKwargs(ProcessingKwargs, total=False): pass
1
0
0
0
0
0
0
0
2
0
0
0
0
0
0
0
11
0
11
2
10
0
2
2
1
0
3
0
0
1,332
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/colpali/processing_colpali.py
transformers.models.colpali.processing_colpali.ColPaliProcessor
from ...feature_extraction_utils import BatchFeature from ...image_utils import ImageInput, make_flat_list_of_images from ...tokenization_utils_base import AddedToken, PreTokenizedInput, TextInput from ...processing_utils import MultiModalData, ProcessingKwargs, ProcessorMixin, Unpack from typing import Optional, Union class ColPaliProcessor(ProcessorMixin): """ Constructs a ColPali processor which wraps a PaliGemmaProcessor and special methods to process images and queries, as well as to compute the late-interaction retrieval score. [`ColPaliProcessor`] offers all the functionalities of [`PaliGemmaProcessor`]. See the [`~PaliGemmaProcessor.__call__`] for more information. Args: image_processor ([`SiglipImageProcessor`], *optional*): The image processor is a required input. tokenizer ([`LlamaTokenizerFast`], *optional*): The tokenizer is a required input. chat_template (`str`, *optional*): A Jinja template which will be used to convert lists of messages in a chat into a tokenizable string. visual_prompt_prefix (`str`, *optional*, defaults to `"Describe the image."`): A string that gets tokenized and prepended to the image tokens. query_prefix (`str`, *optional*, defaults to `"Question: "`): A prefix to be used for the query. """ attributes = ['image_processor', 'tokenizer'] image_processor_class = ('SiglipImageProcessor', 'SiglipImageProcessorFast') tokenizer_class = ('GemmaTokenizer', 'GemmaTokenizerFast') def __init__(self, image_processor=None, tokenizer=None, chat_template=None, visual_prompt_prefix: str='Describe the image.', query_prefix: str='Question: '): super().__init__(image_processor, tokenizer, chat_template=chat_template) if not hasattr(image_processor, 'image_seq_length'): raise ValueError('Image processor is missing an `image_seq_length` attribute.') self.image_seq_length = image_processor.image_seq_length if not hasattr(tokenizer, 'image_token'): image_token = AddedToken(IMAGE_TOKEN, normalized=False, special=True) tokens_to_add = {'additional_special_tokens': [image_token]} tokenizer.add_special_tokens(tokens_to_add) self.image_token_id = tokenizer.convert_tokens_to_ids(IMAGE_TOKEN) self.image_token = IMAGE_TOKEN else: self.image_token_id = tokenizer.image_token_id self.image_token = tokenizer.image_token tokenizer.add_tokens(EXTRA_TOKENS) tokenizer.add_bos_token = False tokenizer.add_eos_token = False self.visual_prompt_prefix = visual_prompt_prefix self.query_prefix = query_prefix def __call__(self, images: Optional[ImageInput]=None, text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]]=None, audio=None, videos=None, **kwargs: Unpack[ColPaliProcessorKwargs]) -> BatchFeature: """ Main method to prepare for the model either (1) one or several texts, either (2) one or several image(s). This method is a custom wrapper around the PaliGemmaProcessor's [`~PaliGemmaProcessor.__call__`] method adapted for the ColPali model. It cannot process both text and images at the same time. When preparing the text(s), this method forwards the `text` and `kwargs` arguments to LlamaTokenizerFast's [`~LlamaTokenizerFast.__call__`]. When preparing the image(s), this method forwards the `images` and `kwargs` arguments to SiglipImageProcessor's [`~SiglipImageProcessor.__call__`]. Please refer to the docstring of the above two methods for more information. Args: images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `list[PIL.Image.Image]`, `list[np.ndarray]`, `list[torch.Tensor]`): The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch tensor. In case of a NumPy array/PyTorch tensor, each image should be of shape (C, H, W), where C is a number of channels, H and W are image height and width. text (`str`, `list[str]`, `list[list[str]]`): The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set `is_split_into_words=True` (to lift the ambiguity with a batch of sequences). return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors of a particular framework. Acceptable values are: - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return NumPy `np.ndarray` objects. Returns: [`BatchFeature`]: A [`BatchFeature`] with the following fields: - **input_ids** -- List of token ids to be fed to a model. - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not `None`). - **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`. """ output_kwargs = self._merge_kwargs(ColPaliProcessorKwargs, tokenizer_init_kwargs=self.tokenizer.init_kwargs, **kwargs) suffix = output_kwargs['text_kwargs'].pop('suffix', None) return_token_type_ids = suffix is not None if text is None and images is None: raise ValueError('Either text or images must be provided') if text is not None and images is not None: raise ValueError('Only one of text or images can be processed at a time') if images is not None: images = self.image_processor.fetch_images(images) images = make_flat_list_of_images(images) texts_doc = [self.visual_prompt_prefix] * len(images) images = [image.convert('RGB') for image in images] input_strings = [build_string_from_input(prompt=prompt, bos_token=self.tokenizer.bos_token, image_seq_len=self.image_seq_length, image_token=IMAGE_TOKEN, num_images=len(image_list) if isinstance(image_list, list) else 1) for prompt, image_list in zip(texts_doc, images)] pixel_values = self.image_processor(images, **output_kwargs['images_kwargs'])['pixel_values'] if output_kwargs['text_kwargs'].get('max_length', None) is not None: output_kwargs['text_kwargs']['max_length'] += self.image_seq_length inputs = self.tokenizer(input_strings, return_token_type_ids=False, **output_kwargs['text_kwargs']) return_data = {**inputs, 'pixel_values': pixel_values} if return_token_type_ids: labels = inputs['input_ids'].masked_fill(inputs['token_type_ids'] == 0, -100) return_data.update({'labels': labels}) return BatchFeature(data=return_data) elif text is not None: if isinstance(text, str): text = [text] elif not (isinstance(text, list) and isinstance(text[0], str)): raise ValueError('Text must be a string or a list of strings') if suffix is None: suffix = self.query_augmentation_token * 10 texts_query: list[str] = [] for query in text: query = self.tokenizer.bos_token + self.query_prefix + query + suffix + '\n' texts_query.append(query) output_kwargs['text_kwargs']['max_length'] = output_kwargs['text_kwargs'].get('max_length', 50) batch_query = self.tokenizer(texts_query, return_token_type_ids=False, **output_kwargs['text_kwargs']) return batch_query def _get_num_multimodal_tokens(self, image_sizes=None, **kwargs): """ Computes the number of placeholder tokens needed for multimodal inputs with the given sizes. Args: image_sizes (list[list[str]], *optional*): The input sizes formatted as (height, width) per each image. Returns: `MultiModalData`: A `MultiModalData` object holding number of tokens per each of the provided input modalities, along with other useful data. """ vision_data = {} if image_sizes is not None: num_image_tokens = [self.image_seq_length] * len(image_sizes) num_image_patches = [1] * len(image_sizes) vision_data.update({'num_image_tokens': num_image_tokens, 'num_image_patches': num_image_patches}) return MultiModalData(**vision_data) @property def query_augmentation_token(self) -> str: """ Return the query augmentation token. Query augmentation buffers are used as reasoning buffers during inference. """ return self.tokenizer.pad_token def process_images(self, images: Optional[ImageInput]=None, **kwargs: Unpack[ColPaliProcessorKwargs]) -> BatchFeature: """ Prepare for the model one or several image(s). This method is a wrapper around the `__call__` method of the ColPaliProcessor's [`ColPaliProcessor.__call__`]. This method forwards the `images` and `kwargs` arguments to the image processor. Args: images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `list[PIL.Image.Image]`, `list[np.ndarray]`, `list[torch.Tensor]`): The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch tensor. In case of a NumPy array/PyTorch tensor, each image should be of shape (C, H, W), where C is a number of channels, H and W are image height and width. return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors of a particular framework. Acceptable values are: - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return NumPy `np.ndarray` objects. Returns: [`BatchFeature`]: A [`BatchFeature`] with the following fields: - **input_ids** -- List of token ids to be fed to a model. - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not `None`). - **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`. """ return self.__call__(images=images, **kwargs) def process_queries(self, text: Union[TextInput, list[TextInput]], **kwargs: Unpack[ColPaliProcessorKwargs]) -> BatchFeature: """ Prepare for the model one or several texts. This method is a wrapper around the `__call__` method of the ColPaliProcessor's [`ColPaliProcessor.__call__`]. This method forwards the `text` and `kwargs` arguments to the tokenizer. Args: text (`str`, `list[str]`, `list[list[str]]`): The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set `is_split_into_words=True` (to lift the ambiguity with a batch of sequences). return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors of a particular framework. Acceptable values are: - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return NumPy `np.ndarray` objects. Returns: [`BatchFeature`]: A [`BatchFeature`] with the following fields: - **input_ids** -- List of token ids to be fed to a model. - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not `None`). """ return self.__call__(text=text, **kwargs) def score_retrieval(self, query_embeddings: Union['torch.Tensor', list['torch.Tensor']], passage_embeddings: Union['torch.Tensor', list['torch.Tensor']], batch_size: int=128, output_dtype: Optional['torch.dtype']=None, output_device: Union['torch.device', str]='cpu') -> 'torch.Tensor': """ Compute the late-interaction/MaxSim score (ColBERT-like) for the given multi-vector query embeddings (`qs`) and passage embeddings (`ps`). For ColPali, a passage is the image of a document page. Because the embedding tensors are multi-vector and can thus have different shapes, they should be fed as: (1) a list of tensors, where the i-th tensor is of shape (sequence_length_i, embedding_dim) (2) a single tensor of shape (n_passages, max_sequence_length, embedding_dim) -> usually obtained by padding the list of tensors. Args: query_embeddings (`Union[torch.Tensor, list[torch.Tensor]`): Query embeddings. passage_embeddings (`Union[torch.Tensor, list[torch.Tensor]`): Passage embeddings. batch_size (`int`, *optional*, defaults to 128): Batch size for computing scores. output_dtype (`torch.dtype`, *optional*, defaults to `torch.float32`): The dtype of the output tensor. If `None`, the dtype of the input embeddings is used. output_device (`torch.device` or `str`, *optional*, defaults to "cpu"): The device of the output tensor. Returns: `torch.Tensor`: A tensor of shape `(n_queries, n_passages)` containing the scores. The score tensor is saved on the "cpu" device. """ if len(query_embeddings) == 0: raise ValueError('No queries provided') if len(passage_embeddings) == 0: raise ValueError('No passages provided') if query_embeddings[0].device != passage_embeddings[0].device: raise ValueError('Queries and passages must be on the same device') if query_embeddings[0].dtype != passage_embeddings[0].dtype: raise ValueError('Queries and passages must have the same dtype') if output_dtype is None: output_dtype = query_embeddings[0].dtype scores: list[torch.Tensor] = [] for i in range(0, len(query_embeddings), batch_size): batch_scores: list[torch.Tensor] = [] batch_queries = torch.nn.utils.rnn.pad_sequence(query_embeddings[i:i + batch_size], batch_first=True, padding_value=0) for j in range(0, len(passage_embeddings), batch_size): batch_passages = torch.nn.utils.rnn.pad_sequence(passage_embeddings[j:j + batch_size], batch_first=True, padding_value=0) batch_scores.append(torch.einsum('bnd,csd->bcns', batch_queries, batch_passages).max(dim=3)[0].sum(dim=2)) scores.append(torch.cat(batch_scores, dim=1).to(output_dtype).to(output_device)) return torch.cat(scores, dim=0)
class ColPaliProcessor(ProcessorMixin): ''' Constructs a ColPali processor which wraps a PaliGemmaProcessor and special methods to process images and queries, as well as to compute the late-interaction retrieval score. [`ColPaliProcessor`] offers all the functionalities of [`PaliGemmaProcessor`]. See the [`~PaliGemmaProcessor.__call__`] for more information. Args: image_processor ([`SiglipImageProcessor`], *optional*): The image processor is a required input. tokenizer ([`LlamaTokenizerFast`], *optional*): The tokenizer is a required input. chat_template (`str`, *optional*): A Jinja template which will be used to convert lists of messages in a chat into a tokenizable string. visual_prompt_prefix (`str`, *optional*, defaults to `"Describe the image."`): A string that gets tokenized and prepended to the image tokens. query_prefix (`str`, *optional*, defaults to `"Question: "`): A prefix to be used for the query. ''' def __init__(self, image_processor=None, tokenizer=None, chat_template=None, visual_prompt_prefix: str='Describe the image.', query_prefix: str='Question: '): pass def __call__(self, images: Optional[ImageInput]=None, text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]]=None, audio=None, videos=None, **kwargs: Unpack[ColPaliProcessorKwargs]) -> BatchFeature: ''' Main method to prepare for the model either (1) one or several texts, either (2) one or several image(s). This method is a custom wrapper around the PaliGemmaProcessor's [`~PaliGemmaProcessor.__call__`] method adapted for the ColPali model. It cannot process both text and images at the same time. When preparing the text(s), this method forwards the `text` and `kwargs` arguments to LlamaTokenizerFast's [`~LlamaTokenizerFast.__call__`]. When preparing the image(s), this method forwards the `images` and `kwargs` arguments to SiglipImageProcessor's [`~SiglipImageProcessor.__call__`]. Please refer to the docstring of the above two methods for more information. Args: images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `list[PIL.Image.Image]`, `list[np.ndarray]`, `list[torch.Tensor]`): The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch tensor. In case of a NumPy array/PyTorch tensor, each image should be of shape (C, H, W), where C is a number of channels, H and W are image height and width. text (`str`, `list[str]`, `list[list[str]]`): The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set `is_split_into_words=True` (to lift the ambiguity with a batch of sequences). return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors of a particular framework. Acceptable values are: - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return NumPy `np.ndarray` objects. Returns: [`BatchFeature`]: A [`BatchFeature`] with the following fields: - **input_ids** -- List of token ids to be fed to a model. - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not `None`). - **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`. ''' pass def _get_num_multimodal_tokens(self, image_sizes=None, **kwargs): ''' Computes the number of placeholder tokens needed for multimodal inputs with the given sizes. Args: image_sizes (list[list[str]], *optional*): The input sizes formatted as (height, width) per each image. Returns: `MultiModalData`: A `MultiModalData` object holding number of tokens per each of the provided input modalities, along with other useful data. ''' pass @property def query_augmentation_token(self) -> str: ''' Return the query augmentation token. Query augmentation buffers are used as reasoning buffers during inference. ''' pass def process_images(self, images: Optional[ImageInput]=None, **kwargs: Unpack[ColPaliProcessorKwargs]) -> BatchFeature: ''' Prepare for the model one or several image(s). This method is a wrapper around the `__call__` method of the ColPaliProcessor's [`ColPaliProcessor.__call__`]. This method forwards the `images` and `kwargs` arguments to the image processor. Args: images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `list[PIL.Image.Image]`, `list[np.ndarray]`, `list[torch.Tensor]`): The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch tensor. In case of a NumPy array/PyTorch tensor, each image should be of shape (C, H, W), where C is a number of channels, H and W are image height and width. return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors of a particular framework. Acceptable values are: - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return NumPy `np.ndarray` objects. Returns: [`BatchFeature`]: A [`BatchFeature`] with the following fields: - **input_ids** -- List of token ids to be fed to a model. - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not `None`). - **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`. ''' pass def process_queries(self, text: Union[TextInput, list[TextInput]], **kwargs: Unpack[ColPaliProcessorKwargs]) -> BatchFeature: ''' Prepare for the model one or several texts. This method is a wrapper around the `__call__` method of the ColPaliProcessor's [`ColPaliProcessor.__call__`]. This method forwards the `text` and `kwargs` arguments to the tokenizer. Args: text (`str`, `list[str]`, `list[list[str]]`): The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set `is_split_into_words=True` (to lift the ambiguity with a batch of sequences). return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors of a particular framework. Acceptable values are: - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return NumPy `np.ndarray` objects. Returns: [`BatchFeature`]: A [`BatchFeature`] with the following fields: - **input_ids** -- List of token ids to be fed to a model. - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not `None`). ''' pass def score_retrieval(self, query_embeddings: Union['torch.Tensor', list['torch.Tensor']], passage_embeddings: Union['torch.Tensor', list['torch.Tensor']], batch_size: int=128, output_dtype: Optional['torch.dtype']=None, output_device: Union['torch.device', str]='cpu') -> 'torch.Tensor': ''' Compute the late-interaction/MaxSim score (ColBERT-like) for the given multi-vector query embeddings (`qs`) and passage embeddings (`ps`). For ColPali, a passage is the image of a document page. Because the embedding tensors are multi-vector and can thus have different shapes, they should be fed as: (1) a list of tensors, where the i-th tensor is of shape (sequence_length_i, embedding_dim) (2) a single tensor of shape (n_passages, max_sequence_length, embedding_dim) -> usually obtained by padding the list of tensors. Args: query_embeddings (`Union[torch.Tensor, list[torch.Tensor]`): Query embeddings. passage_embeddings (`Union[torch.Tensor, list[torch.Tensor]`): Passage embeddings. batch_size (`int`, *optional*, defaults to 128): Batch size for computing scores. output_dtype (`torch.dtype`, *optional*, defaults to `torch.float32`): The dtype of the output tensor. If `None`, the dtype of the input embeddings is used. output_device (`torch.device` or `str`, *optional*, defaults to "cpu"): The device of the output tensor. Returns: `torch.Tensor`: A tensor of shape `(n_queries, n_passages)` containing the scores. The score tensor is saved on the "cpu" device. ''' pass
9
7
34
5
17
12
4
0.78
1
11
2
0
9
2
9
26
343
59
161
70
121
125
99
40
89
16
2
2
35
1,333
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/colpali/processing_colpali.py
transformers.models.colpali.processing_colpali.ColPaliProcessorKwargs
from ...processing_utils import MultiModalData, ProcessingKwargs, ProcessorMixin, Unpack class ColPaliProcessorKwargs(ProcessingKwargs, total=False): _defaults = {'text_kwargs': {'padding': 'longest'}, 'images_kwargs': {'data_format': 'channels_first', 'do_convert_rgb': True}, 'common_kwargs': {'return_tensors': 'pt'}}
class ColPaliProcessorKwargs(ProcessingKwargs, total=False): pass
1
0
0
0
0
0
0
0
2
0
0
0
0
0
0
0
11
0
11
2
10
0
2
2
1
0
3
0
0
1,334
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/conditional_detr/configuration_conditional_detr.py
transformers.models.conditional_detr.configuration_conditional_detr.ConditionalDetrConfig
from ...utils.backbone_utils import verify_backbone_config_arguments from ..auto import CONFIG_MAPPING from ...configuration_utils import PretrainedConfig class ConditionalDetrConfig(PretrainedConfig): """ This is the configuration class to store the configuration of a [`ConditionalDetrModel`]. It is used to instantiate a Conditional DETR model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Conditional DETR [microsoft/conditional-detr-resnet-50](https://huggingface.co/microsoft/conditional-detr-resnet-50) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: use_timm_backbone (`bool`, *optional*, defaults to `True`): Whether or not to use the `timm` library for the backbone. If set to `False`, will use the [`AutoBackbone`] API. backbone_config (`PretrainedConfig` or `dict`, *optional*): The configuration of the backbone model. Only used in case `use_timm_backbone` is set to `False` in which case it will default to `ResNetConfig()`. num_channels (`int`, *optional*, defaults to 3): The number of input channels. num_queries (`int`, *optional*, defaults to 100): Number of object queries, i.e. detection slots. This is the maximal number of objects [`ConditionalDetrModel`] can detect in a single image. For COCO, we recommend 100 queries. d_model (`int`, *optional*, defaults to 256): This parameter is a general dimension parameter, defining dimensions for components such as the encoder layer and projection parameters in the decoder layer, among others. encoder_layers (`int`, *optional*, defaults to 6): Number of encoder layers. decoder_layers (`int`, *optional*, defaults to 6): Number of decoder layers. encoder_attention_heads (`int`, *optional*, defaults to 8): Number of attention heads for each attention layer in the Transformer encoder. decoder_attention_heads (`int`, *optional*, defaults to 8): Number of attention heads for each attention layer in the Transformer decoder. decoder_ffn_dim (`int`, *optional*, defaults to 2048): Dimension of the "intermediate" (often named feed-forward) layer in decoder. encoder_ffn_dim (`int`, *optional*, defaults to 2048): Dimension of the "intermediate" (often named feed-forward) layer in decoder. activation_function (`str` or `function`, *optional*, defaults to `"relu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. dropout (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. activation_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for activations inside the fully connected layer. init_std (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. init_xavier_std (`float`, *optional*, defaults to 1): The scaling factor used for the Xavier initialization gain in the HM Attention map module. encoder_layerdrop (`float`, *optional*, defaults to 0.0): The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556) for more details. decoder_layerdrop (`float`, *optional*, defaults to 0.0): The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556) for more details. auxiliary_loss (`bool`, *optional*, defaults to `False`): Whether auxiliary decoding losses (loss at each decoder layer) are to be used. position_embedding_type (`str`, *optional*, defaults to `"sine"`): Type of position embeddings to be used on top of the image features. One of `"sine"` or `"learned"`. backbone (`str`, *optional*, defaults to `"resnet50"`): Name of backbone to use when `backbone_config` is `None`. If `use_pretrained_backbone` is `True`, this will load the corresponding pretrained weights from the timm or transformers library. If `use_pretrained_backbone` is `False`, this loads the backbone's config and uses that to initialize the backbone with random weights. use_pretrained_backbone (`bool`, *optional*, defaults to `True`): Whether to use pretrained weights for the backbone. backbone_kwargs (`dict`, *optional*): Keyword arguments to be passed to AutoBackbone when loading from a checkpoint e.g. `{'out_indices': (0, 1, 2, 3)}`. Cannot be specified if `backbone_config` is set. dilation (`bool`, *optional*, defaults to `False`): Whether to replace stride with dilation in the last convolutional block (DC5). Only supported when `use_timm_backbone` = `True`. class_cost (`float`, *optional*, defaults to 1): Relative weight of the classification error in the Hungarian matching cost. bbox_cost (`float`, *optional*, defaults to 5): Relative weight of the L1 error of the bounding box coordinates in the Hungarian matching cost. giou_cost (`float`, *optional*, defaults to 2): Relative weight of the generalized IoU loss of the bounding box in the Hungarian matching cost. mask_loss_coefficient (`float`, *optional*, defaults to 1): Relative weight of the Focal loss in the panoptic segmentation loss. dice_loss_coefficient (`float`, *optional*, defaults to 1): Relative weight of the DICE/F-1 loss in the panoptic segmentation loss. bbox_loss_coefficient (`float`, *optional*, defaults to 5): Relative weight of the L1 bounding box loss in the object detection loss. giou_loss_coefficient (`float`, *optional*, defaults to 2): Relative weight of the generalized IoU loss in the object detection loss. eos_coefficient (`float`, *optional*, defaults to 0.1): Relative classification weight of the 'no-object' class in the object detection loss. focal_alpha (`float`, *optional*, defaults to 0.25): Alpha parameter in the focal loss. Examples: ```python >>> from transformers import ConditionalDetrConfig, ConditionalDetrModel >>> # Initializing a Conditional DETR microsoft/conditional-detr-resnet-50 style configuration >>> configuration = ConditionalDetrConfig() >>> # Initializing a model (with random weights) from the microsoft/conditional-detr-resnet-50 style configuration >>> model = ConditionalDetrModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = 'conditional_detr' keys_to_ignore_at_inference = ['past_key_values'] attribute_map = {'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads'} def __init__(self, use_timm_backbone=True, backbone_config=None, num_channels=3, num_queries=300, encoder_layers=6, encoder_ffn_dim=2048, encoder_attention_heads=8, decoder_layers=6, decoder_ffn_dim=2048, decoder_attention_heads=8, encoder_layerdrop=0.0, decoder_layerdrop=0.0, is_encoder_decoder=True, activation_function='relu', d_model=256, dropout=0.1, attention_dropout=0.0, activation_dropout=0.0, init_std=0.02, init_xavier_std=1.0, auxiliary_loss=False, position_embedding_type='sine', backbone='resnet50', use_pretrained_backbone=True, backbone_kwargs=None, dilation=False, class_cost=2, bbox_cost=5, giou_cost=2, mask_loss_coefficient=1, dice_loss_coefficient=1, cls_loss_coefficient=2, bbox_loss_coefficient=5, giou_loss_coefficient=2, focal_alpha=0.25, **kwargs): if use_timm_backbone and backbone_kwargs is None: backbone_kwargs = {} if dilation: backbone_kwargs['output_stride'] = 16 backbone_kwargs['out_indices'] = [1, 2, 3, 4] backbone_kwargs['in_chans'] = num_channels elif not use_timm_backbone and backbone in (None, 'resnet50'): if backbone_config is None: logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.') backbone_config = CONFIG_MAPPING['resnet'](out_features=['stage4']) elif isinstance(backbone_config, dict): backbone_model_type = backbone_config.get('model_type') config_class = CONFIG_MAPPING[backbone_model_type] backbone_config = config_class.from_dict(backbone_config) verify_backbone_config_arguments(use_timm_backbone=use_timm_backbone, use_pretrained_backbone=use_pretrained_backbone, backbone=backbone, backbone_config=backbone_config, backbone_kwargs=backbone_kwargs) self.use_timm_backbone = use_timm_backbone self.backbone_config = backbone_config self.num_channels = num_channels self.num_queries = num_queries self.d_model = d_model self.encoder_ffn_dim = encoder_ffn_dim self.encoder_layers = encoder_layers self.encoder_attention_heads = encoder_attention_heads self.decoder_ffn_dim = decoder_ffn_dim self.decoder_layers = decoder_layers self.decoder_attention_heads = decoder_attention_heads self.dropout = dropout self.attention_dropout = attention_dropout self.activation_dropout = activation_dropout self.activation_function = activation_function self.init_std = init_std self.init_xavier_std = init_xavier_std self.encoder_layerdrop = encoder_layerdrop self.decoder_layerdrop = decoder_layerdrop self.num_hidden_layers = encoder_layers self.auxiliary_loss = auxiliary_loss self.position_embedding_type = position_embedding_type self.backbone = backbone self.use_pretrained_backbone = use_pretrained_backbone self.backbone_kwargs = backbone_kwargs self.dilation = dilation self.class_cost = class_cost self.bbox_cost = bbox_cost self.giou_cost = giou_cost self.mask_loss_coefficient = mask_loss_coefficient self.dice_loss_coefficient = dice_loss_coefficient self.cls_loss_coefficient = cls_loss_coefficient self.bbox_loss_coefficient = bbox_loss_coefficient self.giou_loss_coefficient = giou_loss_coefficient self.focal_alpha = focal_alpha super().__init__(is_encoder_decoder=is_encoder_decoder, **kwargs) @property def num_attention_heads(self) -> int: return self.encoder_attention_heads @property def hidden_size(self) -> int: return self.d_model @property def sub_configs(self): return {'backbone_config': type(self.backbone_config)} if getattr(self, 'backbone_config', None) is not None else {}
class ConditionalDetrConfig(PretrainedConfig): ''' This is the configuration class to store the configuration of a [`ConditionalDetrModel`]. It is used to instantiate a Conditional DETR model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Conditional DETR [microsoft/conditional-detr-resnet-50](https://huggingface.co/microsoft/conditional-detr-resnet-50) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: use_timm_backbone (`bool`, *optional*, defaults to `True`): Whether or not to use the `timm` library for the backbone. If set to `False`, will use the [`AutoBackbone`] API. backbone_config (`PretrainedConfig` or `dict`, *optional*): The configuration of the backbone model. Only used in case `use_timm_backbone` is set to `False` in which case it will default to `ResNetConfig()`. num_channels (`int`, *optional*, defaults to 3): The number of input channels. num_queries (`int`, *optional*, defaults to 100): Number of object queries, i.e. detection slots. This is the maximal number of objects [`ConditionalDetrModel`] can detect in a single image. For COCO, we recommend 100 queries. d_model (`int`, *optional*, defaults to 256): This parameter is a general dimension parameter, defining dimensions for components such as the encoder layer and projection parameters in the decoder layer, among others. encoder_layers (`int`, *optional*, defaults to 6): Number of encoder layers. decoder_layers (`int`, *optional*, defaults to 6): Number of decoder layers. encoder_attention_heads (`int`, *optional*, defaults to 8): Number of attention heads for each attention layer in the Transformer encoder. decoder_attention_heads (`int`, *optional*, defaults to 8): Number of attention heads for each attention layer in the Transformer decoder. decoder_ffn_dim (`int`, *optional*, defaults to 2048): Dimension of the "intermediate" (often named feed-forward) layer in decoder. encoder_ffn_dim (`int`, *optional*, defaults to 2048): Dimension of the "intermediate" (often named feed-forward) layer in decoder. activation_function (`str` or `function`, *optional*, defaults to `"relu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. dropout (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. activation_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for activations inside the fully connected layer. init_std (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. init_xavier_std (`float`, *optional*, defaults to 1): The scaling factor used for the Xavier initialization gain in the HM Attention map module. encoder_layerdrop (`float`, *optional*, defaults to 0.0): The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556) for more details. decoder_layerdrop (`float`, *optional*, defaults to 0.0): The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556) for more details. auxiliary_loss (`bool`, *optional*, defaults to `False`): Whether auxiliary decoding losses (loss at each decoder layer) are to be used. position_embedding_type (`str`, *optional*, defaults to `"sine"`): Type of position embeddings to be used on top of the image features. One of `"sine"` or `"learned"`. backbone (`str`, *optional*, defaults to `"resnet50"`): Name of backbone to use when `backbone_config` is `None`. If `use_pretrained_backbone` is `True`, this will load the corresponding pretrained weights from the timm or transformers library. If `use_pretrained_backbone` is `False`, this loads the backbone's config and uses that to initialize the backbone with random weights. use_pretrained_backbone (`bool`, *optional*, defaults to `True`): Whether to use pretrained weights for the backbone. backbone_kwargs (`dict`, *optional*): Keyword arguments to be passed to AutoBackbone when loading from a checkpoint e.g. `{'out_indices': (0, 1, 2, 3)}`. Cannot be specified if `backbone_config` is set. dilation (`bool`, *optional*, defaults to `False`): Whether to replace stride with dilation in the last convolutional block (DC5). Only supported when `use_timm_backbone` = `True`. class_cost (`float`, *optional*, defaults to 1): Relative weight of the classification error in the Hungarian matching cost. bbox_cost (`float`, *optional*, defaults to 5): Relative weight of the L1 error of the bounding box coordinates in the Hungarian matching cost. giou_cost (`float`, *optional*, defaults to 2): Relative weight of the generalized IoU loss of the bounding box in the Hungarian matching cost. mask_loss_coefficient (`float`, *optional*, defaults to 1): Relative weight of the Focal loss in the panoptic segmentation loss. dice_loss_coefficient (`float`, *optional*, defaults to 1): Relative weight of the DICE/F-1 loss in the panoptic segmentation loss. bbox_loss_coefficient (`float`, *optional*, defaults to 5): Relative weight of the L1 bounding box loss in the object detection loss. giou_loss_coefficient (`float`, *optional*, defaults to 2): Relative weight of the generalized IoU loss in the object detection loss. eos_coefficient (`float`, *optional*, defaults to 0.1): Relative classification weight of the 'no-object' class in the object detection loss. focal_alpha (`float`, *optional*, defaults to 0.25): Alpha parameter in the focal loss. Examples: ```python >>> from transformers import ConditionalDetrConfig, ConditionalDetrModel >>> # Initializing a Conditional DETR microsoft/conditional-detr-resnet-50 style configuration >>> configuration = ConditionalDetrConfig() >>> # Initializing a model (with random weights) from the microsoft/conditional-detr-resnet-50 style configuration >>> model = ConditionalDetrModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```''' def __init__(self, use_timm_backbone=True, backbone_config=None, num_channels=3, num_queries=300, encoder_layers=6, encoder_ffn_dim=2048, encoder_attention_heads=8, decoder_layers=6, decoder_ffn_dim=2048, decoder_attention_heads=8, encoder_layerdrop=0.0, decoder_layerdrop=0.0, is_encoder_decoder=True, activation_function='relu', d_model=256, dropout=0.1, attention_dropout=0.0, activation_dropout=0.0, init_std=0.02, init_xavier_std=1.0, auxiliary_loss=False, position_embedding_type='sine', backbone='resnet50', use_pretrained_backbone=True, backbone_kwargs=None, dilation=False, class_cost=2, bbox_cost=5, giou_cost=2, mask_loss_coefficient=1, dice_loss_coefficient=1, cls_loss_coefficient=2, bbox_loss_coefficient=5, giou_loss_coefficient=2, focal_alpha=0.25, **kwargs): pass @property def num_attention_heads(self) -> int: pass @property def hidden_size(self) -> int: pass @property def sub_configs(self): pass
8
1
36
1
33
2
3
0.93
1
3
0
0
3
35
3
3
223
13
109
84
65
101
58
44
54
6
1
2
8
1,335
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/conditional_detr/configuration_conditional_detr.py
transformers.models.conditional_detr.configuration_conditional_detr.ConditionalDetrOnnxConfig
from ...onnx import OnnxConfig from collections import OrderedDict from collections.abc import Mapping from packaging import version class ConditionalDetrOnnxConfig(OnnxConfig): torch_onnx_minimum_version = version.parse('1.11') @property def inputs(self) -> Mapping[str, Mapping[int, str]]: return OrderedDict([('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ('pixel_mask', {0: 'batch'})]) @property def atol_for_validation(self) -> float: return 1e-05 @property def default_onnx_opset(self) -> int: return 12
class ConditionalDetrOnnxConfig(OnnxConfig): @property def inputs(self) -> Mapping[str, Mapping[int, str]]: pass @property def atol_for_validation(self) -> float: pass @property def default_onnx_opset(self) -> int: pass
7
0
4
0
4
0
1
0
1
4
0
0
3
0
3
3
19
3
16
8
9
0
8
5
4
1
1
0
3
1,336
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/conditional_detr/feature_extraction_conditional_detr.py
transformers.models.conditional_detr.feature_extraction_conditional_detr.ConditionalDetrFeatureExtractor
from ...utils.import_utils import requires import warnings from .image_processing_conditional_detr import ConditionalDetrImageProcessor @requires(backends=('vision',)) class ConditionalDetrFeatureExtractor(ConditionalDetrImageProcessor): def __init__(self, *args, **kwargs) -> None: warnings.warn('The class ConditionalDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please use ConditionalDetrImageProcessor instead.', FutureWarning) super().__init__(*args, **kwargs)
@requires(backends=('vision',)) class ConditionalDetrFeatureExtractor(ConditionalDetrImageProcessor): def __init__(self, *args, **kwargs) -> None: pass
3
0
7
0
7
0
1
0
1
2
0
0
1
0
1
37
8
0
8
2
6
0
4
2
2
1
4
0
1
1,337
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/conditional_detr/image_processing_conditional_detr.py
transformers.models.conditional_detr.image_processing_conditional_detr.ConditionalDetrImageProcessor
from ...image_processing_utils import BaseImageProcessor, get_size_dict from ...feature_extraction_utils import BatchFeature import pathlib from typing import Any, Optional, Union import numpy as np from collections.abc import Iterable from ...utils.import_utils import requires from ...image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, AnnotationFormat, AnnotationType, ChannelDimension, ImageInput, PILImageResampling, get_image_size, infer_channel_dimension_format, is_scaled_image, make_flat_list_of_images, to_numpy_array, valid_images, validate_annotations, validate_kwargs, validate_preprocess_arguments from ...utils import TensorType, is_scipy_available, is_torch_available, is_torch_tensor, is_vision_available, logging from ...image_transforms import PaddingMode, center_to_corners_format, corners_to_center_format, id_to_rgb, pad, rescale, resize, rgb_to_id, to_channel_dimension_format @requires(backends=('vision',)) class ConditionalDetrImageProcessor(BaseImageProcessor): """ Constructs a Conditional Detr image processor. Args: format (`str`, *optional*, defaults to `"coco_detection"`): Data format of the annotations. One of "coco_detection" or "coco_panoptic". do_resize (`bool`, *optional*, defaults to `True`): Controls whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by the `do_resize` parameter in the `preprocess` method. size (`dict[str, int]` *optional*, defaults to `{"shortest_edge": 800, "longest_edge": 1333}`): Size of the image's `(height, width)` dimensions after resizing. Can be overridden by the `size` parameter in the `preprocess` method. Available options are: - `{"height": int, "width": int}`: The image will be resized to the exact size `(height, width)`. Do NOT keep the aspect ratio. - `{"shortest_edge": int, "longest_edge": int}`: The image will be resized to a maximum size respecting the aspect ratio and keeping the shortest edge less or equal to `shortest_edge` and the longest edge less or equal to `longest_edge`. - `{"max_height": int, "max_width": int}`: The image will be resized to the maximum size respecting the aspect ratio and keeping the height less or equal to `max_height` and the width less or equal to `max_width`. resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`): Resampling filter to use if resizing the image. do_rescale (`bool`, *optional*, defaults to `True`): Controls whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale` parameter in the `preprocess` method. rescale_factor (`int` or `float`, *optional*, defaults to `1/255`): Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the `preprocess` method. do_normalize: Controls whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess` method. image_mean (`float` or `list[float]`, *optional*, defaults to `IMAGENET_DEFAULT_MEAN`): Mean values to use when normalizing the image. Can be a single value or a list of values, one for each channel. Can be overridden by the `image_mean` parameter in the `preprocess` method. image_std (`float` or `list[float]`, *optional*, defaults to `IMAGENET_DEFAULT_STD`): Standard deviation values to use when normalizing the image. Can be a single value or a list of values, one for each channel. Can be overridden by the `image_std` parameter in the `preprocess` method. do_convert_annotations (`bool`, *optional*, defaults to `True`): Controls whether to convert the annotations to the format expected by the DETR model. Converts the bounding boxes to the format `(center_x, center_y, width, height)` and in the range `[0, 1]`. Can be overridden by the `do_convert_annotations` parameter in the `preprocess` method. do_pad (`bool`, *optional*, defaults to `True`): Controls whether to pad the image. Can be overridden by the `do_pad` parameter in the `preprocess` method. If `True`, padding will be applied to the bottom and right of the image with zeros. If `pad_size` is provided, the image will be padded to the specified dimensions. Otherwise, the image will be padded to the maximum height and width of the batch. pad_size (`dict[str, int]`, *optional*): The size `{"height": int, "width" int}` to pad the images to. Must be larger than any image size provided for preprocessing. If `pad_size` is not provided, images will be padded to the largest height and width in the batch. """ model_input_names = ['pixel_values', 'pixel_mask'] def __init__(self, format: Union[str, AnnotationFormat]=AnnotationFormat.COCO_DETECTION, do_resize: bool=True, size: Optional[dict[str, int]]=None, resample: PILImageResampling=PILImageResampling.BILINEAR, do_rescale: bool=True, rescale_factor: Union[int, float]=1 / 255, do_normalize: bool=True, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, do_convert_annotations: Optional[bool]=None, do_pad: bool=True, pad_size: Optional[dict[str, int]]=None, **kwargs) -> None: if 'pad_and_return_pixel_mask' in kwargs: do_pad = kwargs.pop('pad_and_return_pixel_mask') if 'max_size' in kwargs: logger.warning_once("The `max_size` parameter is deprecated and will be removed in v4.26. Please specify in `size['longest_edge'] instead`.") max_size = kwargs.pop('max_size') else: max_size = None if size is None else 1333 size = size if size is not None else {'shortest_edge': 800, 'longest_edge': 1333} size = get_size_dict(size, max_size=max_size, default_to_square=False) if do_convert_annotations is None: do_convert_annotations = do_normalize super().__init__(**kwargs) self.format = format self.do_resize = do_resize self.size = size self.resample = resample self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_normalize = do_normalize self.do_convert_annotations = do_convert_annotations self.image_mean = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN self.image_std = image_std if image_std is not None else IMAGENET_DEFAULT_STD self.do_pad = do_pad self.pad_size = pad_size self._valid_processor_keys = ['images', 'annotations', 'return_segmentation_masks', 'masks_path', 'do_resize', 'size', 'resample', 'do_rescale', 'rescale_factor', 'do_normalize', 'do_convert_annotations', 'image_mean', 'image_std', 'do_pad', 'pad_size', 'format', 'return_tensors', 'data_format', 'input_data_format'] @classmethod def from_dict(cls, image_processor_dict: dict[str, Any], **kwargs): """ Overrides the `from_dict` method from the base class to make sure parameters are updated if image processor is created using from_dict and kwargs e.g. `ConditionalDetrImageProcessor.from_pretrained(checkpoint, size=600, max_size=800)` """ image_processor_dict = image_processor_dict.copy() if 'max_size' in kwargs: image_processor_dict['max_size'] = kwargs.pop('max_size') if 'pad_and_return_pixel_mask' in kwargs: image_processor_dict['pad_and_return_pixel_mask'] = kwargs.pop('pad_and_return_pixel_mask') return super().from_dict(image_processor_dict, **kwargs) def prepare_annotation(self, image: np.ndarray, target: dict, format: Optional[AnnotationFormat]=None, return_segmentation_masks: Optional[bool]=None, masks_path: Optional[Union[str, pathlib.Path]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> dict: """ Prepare an annotation for feeding into ConditionalDetr model. """ format = format if format is not None else self.format if format == AnnotationFormat.COCO_DETECTION: return_segmentation_masks = False if return_segmentation_masks is None else return_segmentation_masks target = prepare_coco_detection_annotation(image, target, return_segmentation_masks, input_data_format=input_data_format) elif format == AnnotationFormat.COCO_PANOPTIC: return_segmentation_masks = True if return_segmentation_masks is None else return_segmentation_masks target = prepare_coco_panoptic_annotation(image, target, masks_path=masks_path, return_masks=return_segmentation_masks, input_data_format=input_data_format) else: raise ValueError(f'Format {format} is not supported.') return target def resize(self, image: np.ndarray, size: dict[str, int], resample: PILImageResampling=PILImageResampling.BILINEAR, data_format: Optional[ChannelDimension]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray: """ Resize the image to the given size. Size can be `min_size` (scalar) or `(height, width)` tuple. If size is an int, smaller edge of the image will be matched to this number. Args: image (`np.ndarray`): Image to resize. size (`dict[str, int]`): Size of the image's `(height, width)` dimensions after resizing. Available options are: - `{"height": int, "width": int}`: The image will be resized to the exact size `(height, width)`. Do NOT keep the aspect ratio. - `{"shortest_edge": int, "longest_edge": int}`: The image will be resized to a maximum size respecting the aspect ratio and keeping the shortest edge less or equal to `shortest_edge` and the longest edge less or equal to `longest_edge`. - `{"max_height": int, "max_width": int}`: The image will be resized to the maximum size respecting the aspect ratio and keeping the height less or equal to `max_height` and the width less or equal to `max_width`. resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`): Resampling filter to use if resizing the image. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format for the output image. If unset, the channel dimension format of the input image is used. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred. """ if 'max_size' in kwargs: logger.warning_once("The `max_size` parameter is deprecated and will be removed in v4.26. Please specify in `size['longest_edge'] instead`.") max_size = kwargs.pop('max_size') else: max_size = None size = get_size_dict(size, max_size=max_size, default_to_square=False) if 'shortest_edge' in size and 'longest_edge' in size: new_size = get_resize_output_image_size(image, size['shortest_edge'], size['longest_edge'], input_data_format=input_data_format) elif 'max_height' in size and 'max_width' in size: new_size = get_image_size_for_max_height_width(image, size['max_height'], size['max_width'], input_data_format=input_data_format) elif 'height' in size and 'width' in size: new_size = (size['height'], size['width']) else: raise ValueError(f"Size must contain 'height' and 'width' keys or 'shortest_edge' and 'longest_edge' keys. Got {size.keys()}.") image = resize(image, size=new_size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs) return image def resize_annotation(self, annotation, orig_size, size, resample: PILImageResampling=PILImageResampling.NEAREST) -> dict: """ Resize the annotation to match the resized image. If size is an int, smaller edge of the mask will be matched to this number. """ return resize_annotation(annotation, orig_size=orig_size, target_size=size, resample=resample) def rescale(self, image: np.ndarray, rescale_factor: float, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray: """ Rescale the image by the given factor. image = image * rescale_factor. Args: image (`np.ndarray`): Image to rescale. rescale_factor (`float`): The value to use for rescaling. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format for the output image. If unset, the channel dimension format of the input image is used. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. input_data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format for the input image. If unset, is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. """ return rescale(image, rescale_factor, data_format=data_format, input_data_format=input_data_format) def normalize_annotation(self, annotation: dict, image_size: tuple[int, int]) -> dict: """ Normalize the boxes in the annotation from `[top_left_x, top_left_y, bottom_right_x, bottom_right_y]` to `[center_x, center_y, width, height]` format and from absolute to relative pixel values. """ return normalize_annotation(annotation, image_size=image_size) def _update_annotation_for_padded_image(self, annotation: dict, input_image_size: tuple[int, int], output_image_size: tuple[int, int], padding, update_bboxes) -> dict: """ Update the annotation for a padded image. """ new_annotation = {} new_annotation['size'] = output_image_size for key, value in annotation.items(): if key == 'masks': masks = value masks = pad(masks, padding, mode=PaddingMode.CONSTANT, constant_values=0, input_data_format=ChannelDimension.FIRST) masks = safe_squeeze(masks, 1) new_annotation['masks'] = masks elif key == 'boxes' and update_bboxes: boxes = value boxes *= np.asarray([input_image_size[1] / output_image_size[1], input_image_size[0] / output_image_size[0], input_image_size[1] / output_image_size[1], input_image_size[0] / output_image_size[0]]) new_annotation['boxes'] = boxes elif key == 'size': new_annotation['size'] = output_image_size else: new_annotation[key] = value return new_annotation def _pad_image(self, image: np.ndarray, output_size: tuple[int, int], annotation: Optional[dict[str, Any]]=None, constant_values: Union[float, Iterable[float]]=0, data_format: Optional[ChannelDimension]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, update_bboxes: bool=True) -> np.ndarray: """ Pad an image with zeros to the given size. """ input_height, input_width = get_image_size(image, channel_dim=input_data_format) output_height, output_width = output_size pad_bottom = output_height - input_height pad_right = output_width - input_width padding = ((0, pad_bottom), (0, pad_right)) padded_image = pad(image, padding, mode=PaddingMode.CONSTANT, constant_values=constant_values, data_format=data_format, input_data_format=input_data_format) if annotation is not None: annotation = self._update_annotation_for_padded_image(annotation, (input_height, input_width), (output_height, output_width), padding, update_bboxes) return (padded_image, annotation) def pad(self, images: list[np.ndarray], annotations: Optional[Union[AnnotationType, list[AnnotationType]]]=None, constant_values: Union[float, Iterable[float]]=0, return_pixel_mask: bool=True, return_tensors: Optional[Union[str, TensorType]]=None, data_format: Optional[ChannelDimension]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, update_bboxes: bool=True, pad_size: Optional[dict[str, int]]=None) -> BatchFeature: """ Pads a batch of images to the bottom and right of the image with zeros to the size of largest height and width in the batch and optionally returns their corresponding pixel mask. Args: images (list[`np.ndarray`]): Images to pad. annotations (`AnnotationType` or `list[AnnotationType]`, *optional*): Annotations to transform according to the padding that is applied to the images. constant_values (`float` or `Iterable[float]`, *optional*): The value to use for the padding if `mode` is `"constant"`. return_pixel_mask (`bool`, *optional*, defaults to `True`): Whether to return a pixel mask. return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Can be one of: - Unset: Return a list of `np.ndarray`. - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the image. If not provided, it will be the same as the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred. update_bboxes (`bool`, *optional*, defaults to `True`): Whether to update the bounding boxes in the annotations to match the padded images. If the bounding boxes have not been converted to relative coordinates and `(centre_x, centre_y, width, height)` format, the bounding boxes will not be updated. pad_size (`dict[str, int]`, *optional*): The size `{"height": int, "width" int}` to pad the images to. Must be larger than any image size provided for preprocessing. If `pad_size` is not provided, images will be padded to the largest height and width in the batch. """ pad_size = pad_size if pad_size is not None else self.pad_size if pad_size is not None: padded_size = (pad_size['height'], pad_size['width']) else: padded_size = get_max_height_width(images, input_data_format=input_data_format) annotation_list = annotations if annotations is not None else [None] * len(images) padded_images = [] padded_annotations = [] for image, annotation in zip(images, annotation_list): padded_image, padded_annotation = self._pad_image(image, padded_size, annotation, constant_values=constant_values, data_format=data_format, input_data_format=input_data_format, update_bboxes=update_bboxes) padded_images.append(padded_image) padded_annotations.append(padded_annotation) data = {'pixel_values': padded_images} if return_pixel_mask: masks = [make_pixel_mask(image=image, output_size=padded_size, input_data_format=input_data_format) for image in images] data['pixel_mask'] = masks encoded_inputs = BatchFeature(data=data, tensor_type=return_tensors) if annotations is not None: encoded_inputs['labels'] = [BatchFeature(annotation, tensor_type=return_tensors) for annotation in padded_annotations] return encoded_inputs def preprocess(self, images: ImageInput, annotations: Optional[Union[AnnotationType, list[AnnotationType]]]=None, return_segmentation_masks: Optional[bool]=None, masks_path: Optional[Union[str, pathlib.Path]]=None, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, resample=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[Union[int, float]]=None, do_normalize: Optional[bool]=None, do_convert_annotations: Optional[bool]=None, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, do_pad: Optional[bool]=None, format: Optional[Union[str, AnnotationFormat]]=None, return_tensors: Optional[Union[TensorType, str]]=None, data_format: Union[str, ChannelDimension]=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None, pad_size: Optional[dict[str, int]]=None, **kwargs) -> BatchFeature: """ Preprocess an image or a batch of images so that it can be used by the model. Args: images (`ImageInput`): Image or batch of images to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If passing in images with pixel values between 0 and 1, set `do_rescale=False`. annotations (`AnnotationType` or `list[AnnotationType]`, *optional*): List of annotations associated with the image or batch of images. If annotation is for object detection, the annotations should be a dictionary with the following keys: - "image_id" (`int`): The image id. - "annotations" (`list[Dict]`): List of annotations for an image. Each annotation should be a dictionary. An image can have no annotations, in which case the list should be empty. If annotation is for segmentation, the annotations should be a dictionary with the following keys: - "image_id" (`int`): The image id. - "segments_info" (`list[Dict]`): List of segments for an image. Each segment should be a dictionary. An image can have no segments, in which case the list should be empty. - "file_name" (`str`): The file name of the image. return_segmentation_masks (`bool`, *optional*, defaults to self.return_segmentation_masks): Whether to return segmentation masks. masks_path (`str` or `pathlib.Path`, *optional*): Path to the directory containing the segmentation masks. do_resize (`bool`, *optional*, defaults to self.do_resize): Whether to resize the image. size (`dict[str, int]`, *optional*, defaults to self.size): Size of the image's `(height, width)` dimensions after resizing. Available options are: - `{"height": int, "width": int}`: The image will be resized to the exact size `(height, width)`. Do NOT keep the aspect ratio. - `{"shortest_edge": int, "longest_edge": int}`: The image will be resized to a maximum size respecting the aspect ratio and keeping the shortest edge less or equal to `shortest_edge` and the longest edge less or equal to `longest_edge`. - `{"max_height": int, "max_width": int}`: The image will be resized to the maximum size respecting the aspect ratio and keeping the height less or equal to `max_height` and the width less or equal to `max_width`. resample (`PILImageResampling`, *optional*, defaults to self.resample): Resampling filter to use when resizing the image. do_rescale (`bool`, *optional*, defaults to self.do_rescale): Whether to rescale the image. rescale_factor (`float`, *optional*, defaults to self.rescale_factor): Rescale factor to use when rescaling the image. do_normalize (`bool`, *optional*, defaults to self.do_normalize): Whether to normalize the image. do_convert_annotations (`bool`, *optional*, defaults to self.do_convert_annotations): Whether to convert the annotations to the format expected by the model. Converts the bounding boxes from the format `(top_left_x, top_left_y, width, height)` to `(center_x, center_y, width, height)` and in relative coordinates. image_mean (`float` or `list[float]`, *optional*, defaults to self.image_mean): Mean to use when normalizing the image. image_std (`float` or `list[float]`, *optional*, defaults to self.image_std): Standard deviation to use when normalizing the image. do_pad (`bool`, *optional*, defaults to self.do_pad): Whether to pad the image. If `True`, padding will be applied to the bottom and right of the image with zeros. If `pad_size` is provided, the image will be padded to the specified dimensions. Otherwise, the image will be padded to the maximum height and width of the batch. format (`str` or `AnnotationFormat`, *optional*, defaults to self.format): Format of the annotations. return_tensors (`str` or `TensorType`, *optional*, defaults to self.return_tensors): Type of tensors to return. If `None`, will return the list of images. data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format for the output image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - Unset: Use the channel dimension format of the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. pad_size (`dict[str, int]`, *optional*): The size `{"height": int, "width" int}` to pad the images to. Must be larger than any image size provided for preprocessing. If `pad_size` is not provided, images will be padded to the largest height and width in the batch. """ if 'pad_and_return_pixel_mask' in kwargs: logger.warning_once('The `pad_and_return_pixel_mask` argument is deprecated and will be removed in a future version, use `do_pad` instead.') do_pad = kwargs.pop('pad_and_return_pixel_mask') if 'max_size' in kwargs: logger.warning_once("The `max_size` argument is deprecated and will be removed in a future version, use `size['longest_edge']` instead.") size = kwargs.pop('max_size') do_resize = self.do_resize if do_resize is None else do_resize size = self.size if size is None else size size = get_size_dict(size=size, default_to_square=False) resample = self.resample if resample is None else resample do_rescale = self.do_rescale if do_rescale is None else do_rescale rescale_factor = self.rescale_factor if rescale_factor is None else rescale_factor do_normalize = self.do_normalize if do_normalize is None else do_normalize image_mean = self.image_mean if image_mean is None else image_mean image_std = self.image_std if image_std is None else image_std do_convert_annotations = self.do_convert_annotations if do_convert_annotations is None else do_convert_annotations do_pad = self.do_pad if do_pad is None else do_pad pad_size = self.pad_size if pad_size is None else pad_size format = self.format if format is None else format images = make_flat_list_of_images(images) if not valid_images(images): raise ValueError('Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, or torch.Tensor.') validate_kwargs(captured_kwargs=kwargs.keys(), valid_processor_keys=self._valid_processor_keys) validate_preprocess_arguments(do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_resize=do_resize, size=size, resample=resample) if annotations is not None and isinstance(annotations, dict): annotations = [annotations] if annotations is not None and len(images) != len(annotations): raise ValueError(f'The number of images ({len(images)}) and annotations ({len(annotations)}) do not match.') format = AnnotationFormat(format) if annotations is not None: validate_annotations(format, SUPPORTED_ANNOTATION_FORMATS, annotations) if masks_path is not None and format == AnnotationFormat.COCO_PANOPTIC and (not isinstance(masks_path, (pathlib.Path, str))): raise ValueError(f'The path to the directory containing the mask PNG files should be provided as a `pathlib.Path` or string object, but is {type(masks_path)} instead.') images = [to_numpy_array(image) for image in images] if do_rescale and is_scaled_image(images[0]): logger.warning_once('It looks like you are trying to rescale already rescaled images. If the input images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again.') if input_data_format is None: input_data_format = infer_channel_dimension_format(images[0]) if annotations is not None: prepared_images = [] prepared_annotations = [] for image, target in zip(images, annotations): target = self.prepare_annotation(image, target, format, return_segmentation_masks=return_segmentation_masks, masks_path=masks_path, input_data_format=input_data_format) prepared_images.append(image) prepared_annotations.append(target) images = prepared_images annotations = prepared_annotations del prepared_images, prepared_annotations if do_resize: if annotations is not None: resized_images, resized_annotations = ([], []) for image, target in zip(images, annotations): orig_size = get_image_size(image, input_data_format) resized_image = self.resize(image, size=size, resample=resample, input_data_format=input_data_format) resized_annotation = self.resize_annotation(target, orig_size, get_image_size(resized_image, input_data_format)) resized_images.append(resized_image) resized_annotations.append(resized_annotation) images = resized_images annotations = resized_annotations del resized_images, resized_annotations else: images = [self.resize(image, size=size, resample=resample, input_data_format=input_data_format) for image in images] if do_rescale: images = [self.rescale(image, rescale_factor, input_data_format=input_data_format) for image in images] if do_normalize: images = [self.normalize(image, image_mean, image_std, input_data_format=input_data_format) for image in images] if do_convert_annotations and annotations is not None: annotations = [self.normalize_annotation(annotation, get_image_size(image, input_data_format)) for annotation, image in zip(annotations, images)] if do_pad: encoded_inputs = self.pad(images, annotations=annotations, return_pixel_mask=True, data_format=data_format, input_data_format=input_data_format, update_bboxes=do_convert_annotations, return_tensors=return_tensors, pad_size=pad_size) else: images = [to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images] encoded_inputs = BatchFeature(data={'pixel_values': images}, tensor_type=return_tensors) if annotations is not None: encoded_inputs['labels'] = [BatchFeature(annotation, tensor_type=return_tensors) for annotation in annotations] return encoded_inputs def post_process(self, outputs, target_sizes): """ Converts the output of [`ConditionalDetrForObjectDetection`] into the format expected by the Pascal VOC format (xmin, ymin, xmax, ymax). Args: outputs ([`ConditionalDetrObjectDetectionOutput`]): Raw outputs of the model. target_sizes (`torch.Tensor` of shape `(batch_size, 2)`): Tensor containing the size (h, w) of each image of the batch. For evaluation, this must be the original image size (before any data augmentation). For visualization, this should be the image size after data augment, but before padding. Returns: `list[Dict]`: A list of dictionaries, each dictionary containing the scores, labels and boxes for an image in the batch as predicted by the model. """ logging.warning_once('`post_process` is deprecated and will be removed in v5 of Transformers, please use `post_process_object_detection` instead, with `threshold=0.` for equivalent results.') out_logits, out_bbox = (outputs.logits, outputs.pred_boxes) if len(out_logits) != len(target_sizes): raise ValueError('Make sure that you pass in as many target sizes as the batch dimension of the logits') if target_sizes.shape[1] != 2: raise ValueError('Each element of target_sizes must contain the size (h, w) of each image of the batch') prob = out_logits.sigmoid() topk_values, topk_indexes = torch.topk(prob.view(out_logits.shape[0], -1), 300, dim=1) scores = topk_values topk_boxes = torch.div(topk_indexes, out_logits.shape[2], rounding_mode='floor') labels = topk_indexes % out_logits.shape[2] boxes = center_to_corners_format(out_bbox) boxes = torch.gather(boxes, 1, topk_boxes.unsqueeze(-1).repeat(1, 1, 4)) img_h, img_w = target_sizes.unbind(1) scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1) boxes = boxes * scale_fct[:, None, :] results = [{'scores': s, 'labels': l, 'boxes': b} for s, l, b in zip(scores, labels, boxes)] return results def post_process_object_detection(self, outputs, threshold: float=0.5, target_sizes: Union[TensorType, list[tuple]]=None, top_k: int=100): """ Converts the raw output of [`ConditionalDetrForObjectDetection`] into final bounding boxes in (top_left_x, top_left_y, bottom_right_x, bottom_right_y) format. Only supports PyTorch. Args: outputs ([`DetrObjectDetectionOutput`]): Raw outputs of the model. threshold (`float`, *optional*): Score threshold to keep object detection predictions. target_sizes (`torch.Tensor` or `list[tuple[int, int]]`, *optional*): Tensor of shape `(batch_size, 2)` or list of tuples (`tuple[int, int]`) containing the target size (height, width) of each image in the batch. If left to None, predictions will not be resized. top_k (`int`, *optional*, defaults to 100): Keep only top k bounding boxes before filtering by thresholding. Returns: `list[Dict]`: A list of dictionaries, each dictionary containing the scores, labels and boxes for an image in the batch as predicted by the model. """ out_logits, out_bbox = (outputs.logits, outputs.pred_boxes) if target_sizes is not None: if len(out_logits) != len(target_sizes): raise ValueError('Make sure that you pass in as many target sizes as the batch dimension of the logits') prob = out_logits.sigmoid() prob = prob.view(out_logits.shape[0], -1) k_value = min(top_k, prob.size(1)) topk_values, topk_indexes = torch.topk(prob, k_value, dim=1) scores = topk_values topk_boxes = torch.div(topk_indexes, out_logits.shape[2], rounding_mode='floor') labels = topk_indexes % out_logits.shape[2] boxes = center_to_corners_format(out_bbox) boxes = torch.gather(boxes, 1, topk_boxes.unsqueeze(-1).repeat(1, 1, 4)) if target_sizes is not None: if isinstance(target_sizes, list): img_h = torch.Tensor([i[0] for i in target_sizes]) img_w = torch.Tensor([i[1] for i in target_sizes]) else: img_h, img_w = target_sizes.unbind(1) scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1).to(boxes.device) boxes = boxes * scale_fct[:, None, :] results = [] for s, l, b in zip(scores, labels, boxes): score = s[s > threshold] label = l[s > threshold] box = b[s > threshold] results.append({'scores': score, 'labels': label, 'boxes': box}) return results def post_process_semantic_segmentation(self, outputs, target_sizes: Optional[list[tuple[int, int]]]=None): """ Converts the output of [`ConditionalDetrForSegmentation`] into semantic segmentation maps. Only supports PyTorch. Args: outputs ([`ConditionalDetrForSegmentation`]): Raw outputs of the model. target_sizes (`list[tuple[int, int]]`, *optional*): A list of tuples (`tuple[int, int]`) containing the target size (height, width) of each image in the batch. If unset, predictions will not be resized. Returns: `list[torch.Tensor]`: A list of length `batch_size`, where each item is a semantic segmentation map of shape (height, width) corresponding to the target_sizes entry (if `target_sizes` is specified). Each entry of each `torch.Tensor` correspond to a semantic class id. """ class_queries_logits = outputs.logits masks_queries_logits = outputs.pred_masks masks_classes = class_queries_logits.softmax(dim=-1)[..., :-1] masks_probs = masks_queries_logits.sigmoid() segmentation = torch.einsum('bqc, bqhw -> bchw', masks_classes, masks_probs) batch_size = class_queries_logits.shape[0] if target_sizes is not None: if batch_size != len(target_sizes): raise ValueError('Make sure that you pass in as many target sizes as the batch dimension of the logits') semantic_segmentation = [] for idx in range(batch_size): resized_logits = nn.functional.interpolate(segmentation[idx].unsqueeze(dim=0), size=target_sizes[idx], mode='bilinear', align_corners=False) semantic_map = resized_logits[0].argmax(dim=0) semantic_segmentation.append(semantic_map) else: semantic_segmentation = segmentation.argmax(dim=1) semantic_segmentation = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])] return semantic_segmentation def post_process_instance_segmentation(self, outputs, threshold: float=0.5, mask_threshold: float=0.5, overlap_mask_area_threshold: float=0.8, target_sizes: Optional[list[tuple[int, int]]]=None, return_coco_annotation: Optional[bool]=False) -> list[dict]: """ Converts the output of [`ConditionalDetrForSegmentation`] into instance segmentation predictions. Only supports PyTorch. Args: outputs ([`ConditionalDetrForSegmentation`]): Raw outputs of the model. threshold (`float`, *optional*, defaults to 0.5): The probability score threshold to keep predicted instance masks. mask_threshold (`float`, *optional*, defaults to 0.5): Threshold to use when turning the predicted masks into binary values. overlap_mask_area_threshold (`float`, *optional*, defaults to 0.8): The overlap mask area threshold to merge or discard small disconnected parts within each binary instance mask. target_sizes (`list[Tuple]`, *optional*): List of length (batch_size), where each list item (`tuple[int, int]]`) corresponds to the requested final size (height, width) of each prediction. If unset, predictions will not be resized. return_coco_annotation (`bool`, *optional*): Defaults to `False`. If set to `True`, segmentation maps are returned in COCO run-length encoding (RLE) format. Returns: `list[Dict]`: A list of dictionaries, one per image, each dictionary containing two keys: - **segmentation** -- A tensor of shape `(height, width)` where each pixel represents a `segment_id` or `list[List]` run-length encoding (RLE) of the segmentation map if return_coco_annotation is set to `True`. Set to `None` if no mask if found above `threshold`. - **segments_info** -- A dictionary that contains additional information on each segment. - **id** -- An integer representing the `segment_id`. - **label_id** -- An integer representing the label / semantic class id corresponding to `segment_id`. - **score** -- Prediction score of segment with `segment_id`. """ class_queries_logits = outputs.logits masks_queries_logits = outputs.pred_masks batch_size = class_queries_logits.shape[0] num_labels = class_queries_logits.shape[-1] - 1 mask_probs = masks_queries_logits.sigmoid() pred_scores, pred_labels = nn.functional.softmax(class_queries_logits, dim=-1).max(-1) results: list[dict[str, TensorType]] = [] for i in range(batch_size): mask_probs_item, pred_scores_item, pred_labels_item = remove_low_and_no_objects(mask_probs[i], pred_scores[i], pred_labels[i], threshold, num_labels) if mask_probs_item.shape[0] <= 0: height, width = target_sizes[i] if target_sizes is not None else mask_probs_item.shape[1:] segmentation = torch.zeros((height, width)) - 1 results.append({'segmentation': segmentation, 'segments_info': []}) continue target_size = target_sizes[i] if target_sizes is not None else None segmentation, segments = compute_segments(mask_probs=mask_probs_item, pred_scores=pred_scores_item, pred_labels=pred_labels_item, mask_threshold=mask_threshold, overlap_mask_area_threshold=overlap_mask_area_threshold, label_ids_to_fuse=[], target_size=target_size) if return_coco_annotation: segmentation = convert_segmentation_to_rle(segmentation) results.append({'segmentation': segmentation, 'segments_info': segments}) return results def post_process_panoptic_segmentation(self, outputs, threshold: float=0.5, mask_threshold: float=0.5, overlap_mask_area_threshold: float=0.8, label_ids_to_fuse: Optional[set[int]]=None, target_sizes: Optional[list[tuple[int, int]]]=None) -> list[dict]: """ Converts the output of [`ConditionalDetrForSegmentation`] into image panoptic segmentation predictions. Only supports PyTorch. Args: outputs ([`ConditionalDetrForSegmentation`]): The outputs from [`ConditionalDetrForSegmentation`]. threshold (`float`, *optional*, defaults to 0.5): The probability score threshold to keep predicted instance masks. mask_threshold (`float`, *optional*, defaults to 0.5): Threshold to use when turning the predicted masks into binary values. overlap_mask_area_threshold (`float`, *optional*, defaults to 0.8): The overlap mask area threshold to merge or discard small disconnected parts within each binary instance mask. label_ids_to_fuse (`Set[int]`, *optional*): The labels in this state will have all their instances be fused together. For instance we could say there can only be one sky in an image, but several persons, so the label ID for sky would be in that set, but not the one for person. target_sizes (`list[Tuple]`, *optional*): List of length (batch_size), where each list item (`tuple[int, int]]`) corresponds to the requested final size (height, width) of each prediction in batch. If unset, predictions will not be resized. Returns: `list[Dict]`: A list of dictionaries, one per image, each dictionary containing two keys: - **segmentation** -- a tensor of shape `(height, width)` where each pixel represents a `segment_id` or `None` if no mask if found above `threshold`. If `target_sizes` is specified, segmentation is resized to the corresponding `target_sizes` entry. - **segments_info** -- A dictionary that contains additional information on each segment. - **id** -- an integer representing the `segment_id`. - **label_id** -- An integer representing the label / semantic class id corresponding to `segment_id`. - **was_fused** -- a boolean, `True` if `label_id` was in `label_ids_to_fuse`, `False` otherwise. Multiple instances of the same class / label were fused and assigned a single `segment_id`. - **score** -- Prediction score of segment with `segment_id`. """ if label_ids_to_fuse is None: logger.warning_once('`label_ids_to_fuse` unset. No instance will be fused.') label_ids_to_fuse = set() class_queries_logits = outputs.logits masks_queries_logits = outputs.pred_masks batch_size = class_queries_logits.shape[0] num_labels = class_queries_logits.shape[-1] - 1 mask_probs = masks_queries_logits.sigmoid() pred_scores, pred_labels = nn.functional.softmax(class_queries_logits, dim=-1).max(-1) results: list[dict[str, TensorType]] = [] for i in range(batch_size): mask_probs_item, pred_scores_item, pred_labels_item = remove_low_and_no_objects(mask_probs[i], pred_scores[i], pred_labels[i], threshold, num_labels) if mask_probs_item.shape[0] <= 0: height, width = target_sizes[i] if target_sizes is not None else mask_probs_item.shape[1:] segmentation = torch.zeros((height, width)) - 1 results.append({'segmentation': segmentation, 'segments_info': []}) continue target_size = target_sizes[i] if target_sizes is not None else None segmentation, segments = compute_segments(mask_probs=mask_probs_item, pred_scores=pred_scores_item, pred_labels=pred_labels_item, mask_threshold=mask_threshold, overlap_mask_area_threshold=overlap_mask_area_threshold, label_ids_to_fuse=label_ids_to_fuse, target_size=target_size) results.append({'segmentation': segmentation, 'segments_info': segments}) return results
@requires(backends=('vision',)) class ConditionalDetrImageProcessor(BaseImageProcessor): ''' Constructs a Conditional Detr image processor. Args: format (`str`, *optional*, defaults to `"coco_detection"`): Data format of the annotations. One of "coco_detection" or "coco_panoptic". do_resize (`bool`, *optional*, defaults to `True`): Controls whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by the `do_resize` parameter in the `preprocess` method. size (`dict[str, int]` *optional*, defaults to `{"shortest_edge": 800, "longest_edge": 1333}`): Size of the image's `(height, width)` dimensions after resizing. Can be overridden by the `size` parameter in the `preprocess` method. Available options are: - `{"height": int, "width": int}`: The image will be resized to the exact size `(height, width)`. Do NOT keep the aspect ratio. - `{"shortest_edge": int, "longest_edge": int}`: The image will be resized to a maximum size respecting the aspect ratio and keeping the shortest edge less or equal to `shortest_edge` and the longest edge less or equal to `longest_edge`. - `{"max_height": int, "max_width": int}`: The image will be resized to the maximum size respecting the aspect ratio and keeping the height less or equal to `max_height` and the width less or equal to `max_width`. resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`): Resampling filter to use if resizing the image. do_rescale (`bool`, *optional*, defaults to `True`): Controls whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale` parameter in the `preprocess` method. rescale_factor (`int` or `float`, *optional*, defaults to `1/255`): Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the `preprocess` method. do_normalize: Controls whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess` method. image_mean (`float` or `list[float]`, *optional*, defaults to `IMAGENET_DEFAULT_MEAN`): Mean values to use when normalizing the image. Can be a single value or a list of values, one for each channel. Can be overridden by the `image_mean` parameter in the `preprocess` method. image_std (`float` or `list[float]`, *optional*, defaults to `IMAGENET_DEFAULT_STD`): Standard deviation values to use when normalizing the image. Can be a single value or a list of values, one for each channel. Can be overridden by the `image_std` parameter in the `preprocess` method. do_convert_annotations (`bool`, *optional*, defaults to `True`): Controls whether to convert the annotations to the format expected by the DETR model. Converts the bounding boxes to the format `(center_x, center_y, width, height)` and in the range `[0, 1]`. Can be overridden by the `do_convert_annotations` parameter in the `preprocess` method. do_pad (`bool`, *optional*, defaults to `True`): Controls whether to pad the image. Can be overridden by the `do_pad` parameter in the `preprocess` method. If `True`, padding will be applied to the bottom and right of the image with zeros. If `pad_size` is provided, the image will be padded to the specified dimensions. Otherwise, the image will be padded to the maximum height and width of the batch. pad_size (`dict[str, int]`, *optional*): The size `{"height": int, "width" int}` to pad the images to. Must be larger than any image size provided for preprocessing. If `pad_size` is not provided, images will be padded to the largest height and width in the batch. ''' def __init__(self, format: Union[str, AnnotationFormat]=AnnotationFormat.COCO_DETECTION, do_resize: bool=True, size: Optional[dict[str, int]]=None, resample: PILImageResampling=PILImageResampling.BILINEAR, do_rescale: bool=True, rescale_factor: Union[int, float]=1 / 255, do_normalize: bool=True, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, do_convert_annotations: Optional[bool]=None, do_pad: bool=True, pad_size: Optional[dict[str, int]]=None, **kwargs) -> None: pass @classmethod def from_dict(cls, image_processor_dict: dict[str, Any], **kwargs): ''' Overrides the `from_dict` method from the base class to make sure parameters are updated if image processor is created using from_dict and kwargs e.g. `ConditionalDetrImageProcessor.from_pretrained(checkpoint, size=600, max_size=800)` ''' pass def prepare_annotation(self, image: np.ndarray, target: dict, format: Optional[AnnotationFormat]=None, return_segmentation_masks: Optional[bool]=None, masks_path: Optional[Union[str, pathlib.Path]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> dict: ''' Prepare an annotation for feeding into ConditionalDetr model. ''' pass def resize(self, image: np.ndarray, size: dict[str, int], resample: PILImageResampling=PILImageResampling.BILINEAR, data_format: Optional[ChannelDimension]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray: ''' Resize the image to the given size. Size can be `min_size` (scalar) or `(height, width)` tuple. If size is an int, smaller edge of the image will be matched to this number. Args: image (`np.ndarray`): Image to resize. size (`dict[str, int]`): Size of the image's `(height, width)` dimensions after resizing. Available options are: - `{"height": int, "width": int}`: The image will be resized to the exact size `(height, width)`. Do NOT keep the aspect ratio. - `{"shortest_edge": int, "longest_edge": int}`: The image will be resized to a maximum size respecting the aspect ratio and keeping the shortest edge less or equal to `shortest_edge` and the longest edge less or equal to `longest_edge`. - `{"max_height": int, "max_width": int}`: The image will be resized to the maximum size respecting the aspect ratio and keeping the height less or equal to `max_height` and the width less or equal to `max_width`. resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`): Resampling filter to use if resizing the image. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format for the output image. If unset, the channel dimension format of the input image is used. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred. ''' pass def resize_annotation(self, annotation, orig_size, size, resample: PILImageResampling=PILImageResampling.NEAREST) -> dict: ''' Resize the annotation to match the resized image. If size is an int, smaller edge of the mask will be matched to this number. ''' pass def rescale(self, image: np.ndarray, rescale_factor: float, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray: ''' Rescale the image by the given factor. image = image * rescale_factor. Args: image (`np.ndarray`): Image to rescale. rescale_factor (`float`): The value to use for rescaling. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format for the output image. If unset, the channel dimension format of the input image is used. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. input_data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format for the input image. If unset, is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. ''' pass def normalize_annotation(self, annotation: dict, image_size: tuple[int, int]) -> dict: ''' Normalize the boxes in the annotation from `[top_left_x, top_left_y, bottom_right_x, bottom_right_y]` to `[center_x, center_y, width, height]` format and from absolute to relative pixel values. ''' pass def _update_annotation_for_padded_image(self, annotation: dict, input_image_size: tuple[int, int], output_image_size: tuple[int, int], padding, update_bboxes) -> dict: ''' Update the annotation for a padded image. ''' pass def _pad_image(self, image: np.ndarray, output_size: tuple[int, int], annotation: Optional[dict[str, Any]]=None, constant_values: Union[float, Iterable[float]]=0, data_format: Optional[ChannelDimension]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, update_bboxes: bool=True) -> np.ndarray: ''' Pad an image with zeros to the given size. ''' pass def pad(self, images: list[np.ndarray], annotations: Optional[Union[AnnotationType, list[AnnotationType]]]=None, constant_values: Union[float, Iterable[float]]=0, return_pixel_mask: bool=True, return_tensors: Optional[Union[str, TensorType]]=None, data_format: Optional[ChannelDimension]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, update_bboxes: bool=True, pad_size: Optional[dict[str, int]]=None) -> BatchFeature: ''' Pads a batch of images to the bottom and right of the image with zeros to the size of largest height and width in the batch and optionally returns their corresponding pixel mask. Args: images (list[`np.ndarray`]): Images to pad. annotations (`AnnotationType` or `list[AnnotationType]`, *optional*): Annotations to transform according to the padding that is applied to the images. constant_values (`float` or `Iterable[float]`, *optional*): The value to use for the padding if `mode` is `"constant"`. return_pixel_mask (`bool`, *optional*, defaults to `True`): Whether to return a pixel mask. return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Can be one of: - Unset: Return a list of `np.ndarray`. - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the image. If not provided, it will be the same as the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred. update_bboxes (`bool`, *optional*, defaults to `True`): Whether to update the bounding boxes in the annotations to match the padded images. If the bounding boxes have not been converted to relative coordinates and `(centre_x, centre_y, width, height)` format, the bounding boxes will not be updated. pad_size (`dict[str, int]`, *optional*): The size `{"height": int, "width" int}` to pad the images to. Must be larger than any image size provided for preprocessing. If `pad_size` is not provided, images will be padded to the largest height and width in the batch. ''' pass def preprocess(self, images: ImageInput, annotations: Optional[Union[AnnotationType, list[AnnotationType]]]=None, return_segmentation_masks: Optional[bool]=None, masks_path: Optional[Union[str, pathlib.Path]]=None, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, resample=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[Union[int, float]]=None, do_normalize: Optional[bool]=None, do_convert_annotations: Optional[bool]=None, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, do_pad: Optional[bool]=None, format: Optional[Union[str, AnnotationFormat]]=None, return_tensors: Optional[Union[TensorType, str]]=None, data_format: Union[str, ChannelDimension]=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None, pad_size: Optional[dict[str, int]]=None, **kwargs) -> BatchFeature: ''' Preprocess an image or a batch of images so that it can be used by the model. Args: images (`ImageInput`): Image or batch of images to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If passing in images with pixel values between 0 and 1, set `do_rescale=False`. annotations (`AnnotationType` or `list[AnnotationType]`, *optional*): List of annotations associated with the image or batch of images. If annotation is for object detection, the annotations should be a dictionary with the following keys: - "image_id" (`int`): The image id. - "annotations" (`list[Dict]`): List of annotations for an image. Each annotation should be a dictionary. An image can have no annotations, in which case the list should be empty. If annotation is for segmentation, the annotations should be a dictionary with the following keys: - "image_id" (`int`): The image id. - "segments_info" (`list[Dict]`): List of segments for an image. Each segment should be a dictionary. An image can have no segments, in which case the list should be empty. - "file_name" (`str`): The file name of the image. return_segmentation_masks (`bool`, *optional*, defaults to self.return_segmentation_masks): Whether to return segmentation masks. masks_path (`str` or `pathlib.Path`, *optional*): Path to the directory containing the segmentation masks. do_resize (`bool`, *optional*, defaults to self.do_resize): Whether to resize the image. size (`dict[str, int]`, *optional*, defaults to self.size): Size of the image's `(height, width)` dimensions after resizing. Available options are: - `{"height": int, "width": int}`: The image will be resized to the exact size `(height, width)`. Do NOT keep the aspect ratio. - `{"shortest_edge": int, "longest_edge": int}`: The image will be resized to a maximum size respecting the aspect ratio and keeping the shortest edge less or equal to `shortest_edge` and the longest edge less or equal to `longest_edge`. - `{"max_height": int, "max_width": int}`: The image will be resized to the maximum size respecting the aspect ratio and keeping the height less or equal to `max_height` and the width less or equal to `max_width`. resample (`PILImageResampling`, *optional*, defaults to self.resample): Resampling filter to use when resizing the image. do_rescale (`bool`, *optional*, defaults to self.do_rescale): Whether to rescale the image. rescale_factor (`float`, *optional*, defaults to self.rescale_factor): Rescale factor to use when rescaling the image. do_normalize (`bool`, *optional*, defaults to self.do_normalize): Whether to normalize the image. do_convert_annotations (`bool`, *optional*, defaults to self.do_convert_annotations): Whether to convert the annotations to the format expected by the model. Converts the bounding boxes from the format `(top_left_x, top_left_y, width, height)` to `(center_x, center_y, width, height)` and in relative coordinates. image_mean (`float` or `list[float]`, *optional*, defaults to self.image_mean): Mean to use when normalizing the image. image_std (`float` or `list[float]`, *optional*, defaults to self.image_std): Standard deviation to use when normalizing the image. do_pad (`bool`, *optional*, defaults to self.do_pad): Whether to pad the image. If `True`, padding will be applied to the bottom and right of the image with zeros. If `pad_size` is provided, the image will be padded to the specified dimensions. Otherwise, the image will be padded to the maximum height and width of the batch. format (`str` or `AnnotationFormat`, *optional*, defaults to self.format): Format of the annotations. return_tensors (`str` or `TensorType`, *optional*, defaults to self.return_tensors): Type of tensors to return. If `None`, will return the list of images. data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format for the output image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - Unset: Use the channel dimension format of the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. pad_size (`dict[str, int]`, *optional*): The size `{"height": int, "width" int}` to pad the images to. Must be larger than any image size provided for preprocessing. If `pad_size` is not provided, images will be padded to the largest height and width in the batch. ''' pass def post_process(self, outputs, target_sizes): ''' Converts the output of [`ConditionalDetrForObjectDetection`] into the format expected by the Pascal VOC format (xmin, ymin, xmax, ymax). Args: outputs ([`ConditionalDetrObjectDetectionOutput`]): Raw outputs of the model. target_sizes (`torch.Tensor` of shape `(batch_size, 2)`): Tensor containing the size (h, w) of each image of the batch. For evaluation, this must be the original image size (before any data augmentation). For visualization, this should be the image size after data augment, but before padding. Returns: `list[Dict]`: A list of dictionaries, each dictionary containing the scores, labels and boxes for an image in the batch as predicted by the model. ''' pass def post_process_object_detection(self, outputs, threshold: float=0.5, target_sizes: Union[TensorType, list[tuple]]=None, top_k: int=100): ''' Converts the raw output of [`ConditionalDetrForObjectDetection`] into final bounding boxes in (top_left_x, top_left_y, bottom_right_x, bottom_right_y) format. Only supports PyTorch. Args: outputs ([`DetrObjectDetectionOutput`]): Raw outputs of the model. threshold (`float`, *optional*): Score threshold to keep object detection predictions. target_sizes (`torch.Tensor` or `list[tuple[int, int]]`, *optional*): Tensor of shape `(batch_size, 2)` or list of tuples (`tuple[int, int]`) containing the target size (height, width) of each image in the batch. If left to None, predictions will not be resized. top_k (`int`, *optional*, defaults to 100): Keep only top k bounding boxes before filtering by thresholding. Returns: `list[Dict]`: A list of dictionaries, each dictionary containing the scores, labels and boxes for an image in the batch as predicted by the model. ''' pass def post_process_semantic_segmentation(self, outputs, target_sizes: Optional[list[tuple[int, int]]]=None): ''' Converts the output of [`ConditionalDetrForSegmentation`] into semantic segmentation maps. Only supports PyTorch. Args: outputs ([`ConditionalDetrForSegmentation`]): Raw outputs of the model. target_sizes (`list[tuple[int, int]]`, *optional*): A list of tuples (`tuple[int, int]`) containing the target size (height, width) of each image in the batch. If unset, predictions will not be resized. Returns: `list[torch.Tensor]`: A list of length `batch_size`, where each item is a semantic segmentation map of shape (height, width) corresponding to the target_sizes entry (if `target_sizes` is specified). Each entry of each `torch.Tensor` correspond to a semantic class id. ''' pass def post_process_instance_segmentation(self, outputs, threshold: float=0.5, mask_threshold: float=0.5, overlap_mask_area_threshold: float=0.8, target_sizes: Optional[list[tuple[int, int]]]=None, return_coco_annotation: Optional[bool]=False) -> list[dict]: ''' Converts the output of [`ConditionalDetrForSegmentation`] into instance segmentation predictions. Only supports PyTorch. Args: outputs ([`ConditionalDetrForSegmentation`]): Raw outputs of the model. threshold (`float`, *optional*, defaults to 0.5): The probability score threshold to keep predicted instance masks. mask_threshold (`float`, *optional*, defaults to 0.5): Threshold to use when turning the predicted masks into binary values. overlap_mask_area_threshold (`float`, *optional*, defaults to 0.8): The overlap mask area threshold to merge or discard small disconnected parts within each binary instance mask. target_sizes (`list[Tuple]`, *optional*): List of length (batch_size), where each list item (`tuple[int, int]]`) corresponds to the requested final size (height, width) of each prediction. If unset, predictions will not be resized. return_coco_annotation (`bool`, *optional*): Defaults to `False`. If set to `True`, segmentation maps are returned in COCO run-length encoding (RLE) format. Returns: `list[Dict]`: A list of dictionaries, one per image, each dictionary containing two keys: - **segmentation** -- A tensor of shape `(height, width)` where each pixel represents a `segment_id` or `list[List]` run-length encoding (RLE) of the segmentation map if return_coco_annotation is set to `True`. Set to `None` if no mask if found above `threshold`. - **segments_info** -- A dictionary that contains additional information on each segment. - **id** -- An integer representing the `segment_id`. - **label_id** -- An integer representing the label / semantic class id corresponding to `segment_id`. - **score** -- Prediction score of segment with `segment_id`. ''' pass def post_process_panoptic_segmentation(self, outputs, threshold: float=0.5, mask_threshold: float=0.5, overlap_mask_area_threshold: float=0.8, label_ids_to_fuse: Optional[set[int]]=None, target_sizes: Optional[list[tuple[int, int]]]=None) -> list[dict]: ''' Converts the output of [`ConditionalDetrForSegmentation`] into image panoptic segmentation predictions. Only supports PyTorch. Args: outputs ([`ConditionalDetrForSegmentation`]): The outputs from [`ConditionalDetrForSegmentation`]. threshold (`float`, *optional*, defaults to 0.5): The probability score threshold to keep predicted instance masks. mask_threshold (`float`, *optional*, defaults to 0.5): Threshold to use when turning the predicted masks into binary values. overlap_mask_area_threshold (`float`, *optional*, defaults to 0.8): The overlap mask area threshold to merge or discard small disconnected parts within each binary instance mask. label_ids_to_fuse (`Set[int]`, *optional*): The labels in this state will have all their instances be fused together. For instance we could say there can only be one sky in an image, but several persons, so the label ID for sky would be in that set, but not the one for person. target_sizes (`list[Tuple]`, *optional*): List of length (batch_size), where each list item (`tuple[int, int]]`) corresponds to the requested final size (height, width) of each prediction in batch. If unset, predictions will not be resized. Returns: `list[Dict]`: A list of dictionaries, one per image, each dictionary containing two keys: - **segmentation** -- a tensor of shape `(height, width)` where each pixel represents a `segment_id` or `None` if no mask if found above `threshold`. If `target_sizes` is specified, segmentation is resized to the corresponding `target_sizes` entry. - **segments_info** -- A dictionary that contains additional information on each segment. - **id** -- an integer representing the `segment_id`. - **label_id** -- An integer representing the label / semantic class id corresponding to `segment_id`. - **was_fused** -- a boolean, `True` if `label_id` was in `label_ids_to_fuse`, `False` otherwise. Multiple instances of the same class / label were fused and assigned a single `segment_id`. - **score** -- Prediction score of segment with `segment_id`. ''' pass
19
16
60
5
37
19
6
0.62
1
18
4
1
15
13
16
36
1,050
95
594
235
466
371
291
124
274
32
3
3
96
1,338
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/conditional_detr/modeling_conditional_detr.py
transformers.models.conditional_detr.modeling_conditional_detr.ConditionalDetrAttention
import torch from torch import Tensor, nn from typing import Optional, Union class ConditionalDetrAttention(nn.Module): """ Cross-Attention used in Conditional DETR 'Conditional DETR for Fast Training Convergence' paper. The key q_proj, k_proj, v_proj are defined outside the attention. This attention allows the dim of q, k to be different to v. """ def __init__(self, embed_dim: int, out_dim: int, num_heads: int, dropout: float=0.0, bias: bool=True): super().__init__() self.embed_dim = embed_dim self.out_dim = out_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads if self.head_dim * num_heads != self.embed_dim: raise ValueError(f'embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {num_heads}).') self.v_head_dim = out_dim // num_heads if self.v_head_dim * num_heads != self.out_dim: raise ValueError(f'out_dim must be divisible by num_heads (got `out_dim`: {self.out_dim} and `num_heads`: {num_heads}).') self.scaling = self.head_dim ** (-0.5) self.out_proj = nn.Linear(out_dim, out_dim, bias=bias) def _qk_shape(self, tensor: torch.Tensor, seq_len: int, batch_size: int): return tensor.view(batch_size, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() def _v_shape(self, tensor: torch.Tensor, seq_len: int, batch_size: int): return tensor.view(batch_size, seq_len, self.num_heads, self.v_head_dim).transpose(1, 2).contiguous() def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, key_states: Optional[torch.Tensor]=None, value_states: Optional[torch.Tensor]=None, output_attentions: bool=False) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: """Input shape: Batch x Time x Channel""" batch_size, target_len, _ = hidden_states.size() query_states = hidden_states * self.scaling key_states = self._qk_shape(key_states, -1, batch_size) value_states = self._v_shape(value_states, -1, batch_size) proj_shape = (batch_size * self.num_heads, -1, self.head_dim) v_proj_shape = (batch_size * self.num_heads, -1, self.v_head_dim) query_states = self._qk_shape(query_states, target_len, batch_size).view(*proj_shape) key_states = key_states.view(*proj_shape) value_states = value_states.view(*v_proj_shape) source_len = key_states.size(1) attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) if attn_weights.size() != (batch_size * self.num_heads, target_len, source_len): raise ValueError(f'Attention weights should be of size {(batch_size * self.num_heads, target_len, source_len)}, but is {attn_weights.size()}') if attention_mask is not None: if attention_mask.size() != (batch_size, 1, target_len, source_len): raise ValueError(f'Attention mask should be of size {(batch_size, 1, target_len, source_len)}, but is {attention_mask.size()}') if attention_mask.dtype == torch.bool: attention_mask = torch.zeros_like(attention_mask, dtype=attn_weights.dtype).masked_fill_(attention_mask, -torch.inf) attn_weights = attn_weights.view(batch_size, self.num_heads, target_len, source_len) + attention_mask attn_weights = attn_weights.view(batch_size * self.num_heads, target_len, source_len) attn_weights = nn.functional.softmax(attn_weights, dim=-1) if output_attentions: attn_weights_reshaped = attn_weights.view(batch_size, self.num_heads, target_len, source_len) attn_weights = attn_weights_reshaped.view(batch_size * self.num_heads, target_len, source_len) else: attn_weights_reshaped = None attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) attn_output = torch.bmm(attn_probs, value_states) if attn_output.size() != (batch_size * self.num_heads, target_len, self.v_head_dim): raise ValueError(f'`attn_output` should be of size {(batch_size, self.num_heads, target_len, self.v_head_dim)}, but is {attn_output.size()}') attn_output = attn_output.view(batch_size, self.num_heads, target_len, self.v_head_dim) attn_output = attn_output.transpose(1, 2) attn_output = attn_output.reshape(batch_size, target_len, self.out_dim) attn_output = self.out_proj(attn_output) return (attn_output, attn_weights_reshaped)
class ConditionalDetrAttention(nn.Module): ''' Cross-Attention used in Conditional DETR 'Conditional DETR for Fast Training Convergence' paper. The key q_proj, k_proj, v_proj are defined outside the attention. This attention allows the dim of q, k to be different to v. ''' def __init__(self, embed_dim: int, out_dim: int, num_heads: int, dropout: float=0.0, bias: bool=True): pass def _qk_shape(self, tensor: torch.Tensor, seq_len: int, batch_size: int): pass def _v_shape(self, tensor: torch.Tensor, seq_len: int, batch_size: int): pass def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, key_states: Optional[torch.Tensor]=None, value_states: Optional[torch.Tensor]=None, output_attentions: bool=False) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: '''Input shape: Batch x Time x Channel''' pass
5
2
26
4
20
2
3
0.16
1
6
0
0
4
8
4
14
115
21
81
36
62
13
52
22
47
6
1
2
11
1,339
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/conditional_detr/modeling_conditional_detr.py
transformers.models.conditional_detr.modeling_conditional_detr.ConditionalDetrConvEncoder
import torch from torch import Tensor, nn from ...utils import ModelOutput, auto_docstring, is_timm_available, logging, requires_backends from ...utils.backbone_utils import load_backbone class ConditionalDetrConvEncoder(nn.Module): """ Convolutional backbone, using either the AutoBackbone API or one from the timm library. nn.BatchNorm2d layers are replaced by ConditionalDetrFrozenBatchNorm2d as defined above. """ def __init__(self, config): super().__init__() self.config = config if config.use_timm_backbone: requires_backends(self, ['timm']) kwargs = getattr(config, 'backbone_kwargs', {}) kwargs = {} if kwargs is None else kwargs.copy() out_indices = kwargs.pop('out_indices', (1, 2, 3, 4)) num_channels = kwargs.pop('in_chans', config.num_channels) if config.dilation: kwargs['output_stride'] = kwargs.get('output_stride', 16) backbone = create_model(config.backbone, pretrained=config.use_pretrained_backbone, features_only=True, out_indices=out_indices, in_chans=num_channels, **kwargs) else: backbone = load_backbone(config) with torch.no_grad(): replace_batch_norm(backbone) self.model = backbone self.intermediate_channel_sizes = self.model.feature_info.channels() if config.use_timm_backbone else self.model.channels backbone_model_type = None if config.backbone is not None: backbone_model_type = config.backbone elif config.backbone_config is not None: backbone_model_type = config.backbone_config.model_type else: raise ValueError('Either `backbone` or `backbone_config` should be provided in the config') if 'resnet' in backbone_model_type: for name, parameter in self.model.named_parameters(): if config.use_timm_backbone: if 'layer2' not in name and 'layer3' not in name and ('layer4' not in name): parameter.requires_grad_(False) elif 'stage.1' not in name and 'stage.2' not in name and ('stage.3' not in name): parameter.requires_grad_(False) def forward(self, pixel_values: torch.Tensor, pixel_mask: torch.Tensor): features = self.model(pixel_values) if self.config.use_timm_backbone else self.model(pixel_values).feature_maps out = [] for feature_map in features: mask = nn.functional.interpolate(pixel_mask[None].float(), size=feature_map.shape[-2:]).to(torch.bool)[0] out.append((feature_map, mask)) return out
class ConditionalDetrConvEncoder(nn.Module): ''' Convolutional backbone, using either the AutoBackbone API or one from the timm library. nn.BatchNorm2d layers are replaced by ConditionalDetrFrozenBatchNorm2d as defined above. ''' def __init__(self, config): pass def forward(self, pixel_values: torch.Tensor, pixel_mask: torch.Tensor): pass
3
1
31
3
25
3
8
0.2
1
4
0
0
2
3
2
12
70
10
50
16
47
10
37
16
34
12
1
4
15
1,340
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/conditional_detr/modeling_conditional_detr.py
transformers.models.conditional_detr.modeling_conditional_detr.ConditionalDetrConvModel
from torch import Tensor, nn class ConditionalDetrConvModel(nn.Module): """ This module adds 2D position embeddings to all intermediate feature maps of the convolutional encoder. """ def __init__(self, conv_encoder, position_embedding): super().__init__() self.conv_encoder = conv_encoder self.position_embedding = position_embedding def forward(self, pixel_values, pixel_mask): out = self.conv_encoder(pixel_values, pixel_mask) pos = [] for feature_map, mask in out: pos.append(self.position_embedding(feature_map, mask).to(feature_map.dtype)) return (out, pos)
class ConditionalDetrConvModel(nn.Module): ''' This module adds 2D position embeddings to all intermediate feature maps of the convolutional encoder. ''' def __init__(self, conv_encoder, position_embedding): pass def forward(self, pixel_values, pixel_mask): pass
3
1
7
1
5
1
2
0.45
1
1
0
0
2
2
2
12
19
3
11
8
8
5
11
8
8
2
1
1
3
1,341
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/conditional_detr/modeling_conditional_detr.py
transformers.models.conditional_detr.modeling_conditional_detr.ConditionalDetrDecoder
import torch from torch import Tensor, nn from .configuration_conditional_detr import ConditionalDetrConfig from ...modeling_attn_mask_utils import _prepare_4d_attention_mask class ConditionalDetrDecoder(ConditionalDetrPreTrainedModel): """ Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`ConditionalDetrDecoderLayer`]. The decoder updates the query embeddings through multiple self-attention and cross-attention layers. Some small tweaks for Conditional DETR: - object_queries and query_position_embeddings are added to the forward pass. - if self.config.auxiliary_loss is set to True, also returns a stack of activations from all decoding layers. Args: config: ConditionalDetrConfig """ def __init__(self, config: ConditionalDetrConfig): super().__init__(config) self.dropout = config.dropout self.layerdrop = config.decoder_layerdrop self.layers = nn.ModuleList([ConditionalDetrDecoderLayer(config) for _ in range(config.decoder_layers)]) self.layernorm = nn.LayerNorm(config.d_model) d_model = config.d_model self.gradient_checkpointing = False self.query_scale = MLP(d_model, d_model, d_model, 2) self.ref_point_head = MLP(d_model, d_model, 2, 2) for layer_id in range(config.decoder_layers - 1): self.layers[layer_id + 1].ca_qpos_proj = None self.post_init() def forward(self, inputs_embeds=None, attention_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, object_queries=None, query_position_embeddings=None, output_attentions=None, output_hidden_states=None, return_dict=None): """ Args: inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): The query embeddings that are passed into the decoder. attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on certain queries. Mask values selected in `[0, 1]`: - 1 for queries that are **not masked**, - 0 for queries that are **masked**. [What are attention masks?](../glossary#attention-mask) encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*): Mask to avoid performing cross-attention on padding pixel_values of the encoder. Mask values selected in `[0, 1]`: - 1 for pixels that are real (i.e. **not masked**), - 0 for pixels that are padding (i.e. **masked**). object_queries (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Position embeddings that are added to the queries and keys in each cross-attention layer. query_position_embeddings (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_size)`): , *optional*): Position embeddings that are added to the queries and keys in each self-attention layer. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict if inputs_embeds is not None: hidden_states = inputs_embeds input_shape = inputs_embeds.size()[:-1] if encoder_hidden_states is not None and encoder_attention_mask is not None: encoder_attention_mask = _prepare_4d_attention_mask(encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]) intermediate = () if self.config.auxiliary_loss else None all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None all_cross_attentions = () if output_attentions and encoder_hidden_states is not None else None reference_points_before_sigmoid = self.ref_point_head(query_position_embeddings) reference_points = reference_points_before_sigmoid.sigmoid().transpose(0, 1) obj_center = reference_points[..., :2].transpose(0, 1) query_sine_embed_before_transformation = gen_sine_position_embeddings(obj_center, self.config.d_model) for idx, decoder_layer in enumerate(self.layers): if output_hidden_states: all_hidden_states += (hidden_states,) if self.training: dropout_probability = torch.rand([]) if dropout_probability < self.layerdrop: continue if idx == 0: pos_transformation = 1 else: pos_transformation = self.query_scale(hidden_states) query_sine_embed = query_sine_embed_before_transformation * pos_transformation layer_outputs = decoder_layer(hidden_states, None, object_queries, query_position_embeddings, query_sine_embed, encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_attentions=output_attentions, is_first=idx == 0) hidden_states = layer_outputs[0] if self.config.auxiliary_loss: hidden_states = self.layernorm(hidden_states) intermediate += (hidden_states,) if output_attentions: all_self_attns += (layer_outputs[1],) if encoder_hidden_states is not None: all_cross_attentions += (layer_outputs[2],) hidden_states = self.layernorm(hidden_states) if output_hidden_states: all_hidden_states += (hidden_states,) if self.config.auxiliary_loss: intermediate = torch.stack(intermediate) if not return_dict: return tuple((v for v in [hidden_states, all_hidden_states, all_self_attns, all_cross_attentions, intermediate, reference_points] if v is not None)) return ConditionalDetrDecoderOutput(last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attns, cross_attentions=all_cross_attentions, intermediate_hidden_states=intermediate, reference_points=reference_points)
class ConditionalDetrDecoder(ConditionalDetrPreTrainedModel): ''' Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`ConditionalDetrDecoderLayer`]. The decoder updates the query embeddings through multiple self-attention and cross-attention layers. Some small tweaks for Conditional DETR: - object_queries and query_position_embeddings are added to the forward pass. - if self.config.auxiliary_loss is set to True, also returns a stack of activations from all decoding layers. Args: config: ConditionalDetrConfig ''' def __init__(self, config: ConditionalDetrConfig): pass def forward(self, inputs_embeds=None, attention_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, object_queries=None, query_position_embeddings=None, output_attentions=None, output_hidden_states=None, return_dict=None): ''' Args: inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): The query embeddings that are passed into the decoder. attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on certain queries. Mask values selected in `[0, 1]`: - 1 for queries that are **not masked**, - 0 for queries that are **masked**. [What are attention masks?](../glossary#attention-mask) encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*): Mask to avoid performing cross-attention on padding pixel_values of the encoder. Mask values selected in `[0, 1]`: - 1 for pixels that are real (i.e. **not masked**), - 0 for pixels that are padding (i.e. **masked**). object_queries (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Position embeddings that are added to the queries and keys in each cross-attention layer. query_position_embeddings (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_size)`): , *optional*): Position embeddings that are added to the queries and keys in each self-attention layer. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. ''' pass
3
2
92
11
59
22
12
0.45
1
8
4
0
2
7
2
3
199
28
119
38
105
53
61
27
58
22
2
3
24
1,342
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/conditional_detr/modeling_conditional_detr.py
transformers.models.conditional_detr.modeling_conditional_detr.ConditionalDetrDecoderLayer
from typing import Optional, Union from ...activations import ACT2FN from ...modeling_layers import GradientCheckpointingLayer import torch from .configuration_conditional_detr import ConditionalDetrConfig from torch import Tensor, nn class ConditionalDetrDecoderLayer(GradientCheckpointingLayer): def __init__(self, config: ConditionalDetrConfig): super().__init__() self.embed_dim = config.d_model d_model = config.d_model self.sa_qcontent_proj = nn.Linear(d_model, d_model) self.sa_qpos_proj = nn.Linear(d_model, d_model) self.sa_kcontent_proj = nn.Linear(d_model, d_model) self.sa_kpos_proj = nn.Linear(d_model, d_model) self.sa_v_proj = nn.Linear(d_model, d_model) self.self_attn = ConditionalDetrAttention(embed_dim=self.embed_dim, out_dim=self.embed_dim, num_heads=config.decoder_attention_heads, dropout=config.attention_dropout) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.activation_dropout = config.activation_dropout self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.ca_qcontent_proj = nn.Linear(d_model, d_model) self.ca_qpos_proj = nn.Linear(d_model, d_model) self.ca_kcontent_proj = nn.Linear(d_model, d_model) self.ca_kpos_proj = nn.Linear(d_model, d_model) self.ca_v_proj = nn.Linear(d_model, d_model) self.ca_qpos_sine_proj = nn.Linear(d_model, d_model) self.encoder_attn = ConditionalDetrAttention(self.embed_dim * 2, self.embed_dim, config.decoder_attention_heads, dropout=config.attention_dropout) self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim) self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim) self.final_layer_norm = nn.LayerNorm(self.embed_dim) self.nhead = config.decoder_attention_heads def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, object_queries: Optional[torch.Tensor]=None, query_position_embeddings: Optional[torch.Tensor]=None, query_sine_embed: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=False, is_first: Optional[bool]=False): """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(seq_len, batch, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, target_len, source_len)` where padding elements are indicated by very large negative values. object_queries (`torch.FloatTensor`, *optional*): object_queries that are added to the queries and keys in the cross-attention layer. query_position_embeddings (`torch.FloatTensor`, *optional*): object_queries that are added to the queries and keys in the self-attention layer. encoder_hidden_states (`torch.FloatTensor`): cross attention input to the layer of shape `(seq_len, batch, embed_dim)` encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size `(batch, 1, target_len, source_len)` where padding elements are indicated by very large negative values. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. """ residual = hidden_states q_content = self.sa_qcontent_proj(hidden_states) q_pos = self.sa_qpos_proj(query_position_embeddings) k_content = self.sa_kcontent_proj(hidden_states) k_pos = self.sa_kpos_proj(query_position_embeddings) v = self.sa_v_proj(hidden_states) _, num_queries, n_model = q_content.shape q = q_content + q_pos k = k_content + k_pos hidden_states, self_attn_weights = self.self_attn(hidden_states=q, attention_mask=attention_mask, key_states=k, value_states=v, output_attentions=output_attentions) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) q_content = self.ca_qcontent_proj(hidden_states) k_content = self.ca_kcontent_proj(encoder_hidden_states) v = self.ca_v_proj(encoder_hidden_states) batch_size, num_queries, n_model = q_content.shape _, source_len, _ = k_content.shape k_pos = self.ca_kpos_proj(object_queries) if is_first: q_pos = self.ca_qpos_proj(query_position_embeddings) q = q_content + q_pos k = k_content + k_pos else: q = q_content k = k_content q = q.view(batch_size, num_queries, self.nhead, n_model // self.nhead) query_sine_embed = self.ca_qpos_sine_proj(query_sine_embed) query_sine_embed = query_sine_embed.view(batch_size, num_queries, self.nhead, n_model // self.nhead) q = torch.cat([q, query_sine_embed], dim=3).view(batch_size, num_queries, n_model * 2) k = k.view(batch_size, source_len, self.nhead, n_model // self.nhead) k_pos = k_pos.view(batch_size, source_len, self.nhead, n_model // self.nhead) k = torch.cat([k, k_pos], dim=3).view(batch_size, source_len, n_model * 2) cross_attn_weights = None if encoder_hidden_states is not None: residual = hidden_states hidden_states, cross_attn_weights = self.encoder_attn(hidden_states=q, attention_mask=encoder_attention_mask, key_states=k, value_states=v, output_attentions=output_attentions) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.encoder_attn_layer_norm(hidden_states) residual = hidden_states hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) hidden_states = self.fc2(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.final_layer_norm(hidden_states) outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights, cross_attn_weights) return outputs
class ConditionalDetrDecoderLayer(GradientCheckpointingLayer): def __init__(self, config: ConditionalDetrConfig): pass def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, object_queries: Optional[torch.Tensor]=None, query_position_embeddings: Optional[torch.Tensor]=None, query_sine_embed: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=False, is_first: Optional[bool]=False): ''' Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(seq_len, batch, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, target_len, source_len)` where padding elements are indicated by very large negative values. object_queries (`torch.FloatTensor`, *optional*): object_queries that are added to the queries and keys in the cross-attention layer. query_position_embeddings (`torch.FloatTensor`, *optional*): object_queries that are added to the queries and keys in the self-attention layer. encoder_hidden_states (`torch.FloatTensor`): cross attention input to the layer of shape `(seq_len, batch, embed_dim)` encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size `(batch, 1, target_len, source_len)` where padding elements are indicated by very large negative values. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. ''' pass
3
1
84
11
55
18
3
0.32
1
5
2
0
2
23
2
12
169
23
111
52
97
36
78
41
75
4
1
1
5
1,343
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/conditional_detr/modeling_conditional_detr.py
transformers.models.conditional_detr.modeling_conditional_detr.ConditionalDetrDecoderOutput
import torch from dataclasses import dataclass from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithCrossAttentions, Seq2SeqModelOutput from typing import Optional, Union from ...utils import ModelOutput, auto_docstring, is_timm_available, logging, requires_backends @dataclass @auto_docstring(custom_intro='\n Base class for outputs of the Conditional DETR decoder. This class adds one attribute to\n BaseModelOutputWithCrossAttentions, namely an optional stack of intermediate decoder activations, i.e. the output\n of each decoder layer, each of them gone through a layernorm. This is useful when training the model with auxiliary\n decoding losses.\n ') class ConditionalDetrDecoderOutput(BaseModelOutputWithCrossAttentions): """ cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` and `config.add_cross_attention=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads. intermediate_hidden_states (`torch.FloatTensor` of shape `(config.decoder_layers, batch_size, num_queries, hidden_size)`, *optional*, returned when `config.auxiliary_loss=True`): Intermediate decoder activations, i.e. the output of each decoder layer, each of them gone through a layernorm. reference_points (`torch.FloatTensor` of shape `(config.decoder_layers, batch_size, num_queries, 2 (anchor points))`): Reference points (reference points of each layer of the decoder). """ intermediate_hidden_states: Optional[torch.FloatTensor] = None reference_points: Optional[tuple[torch.FloatTensor]] = None
@dataclass @auto_docstring(custom_intro='\n Base class for outputs of the Conditional DETR decoder. This class adds one attribute to\n BaseModelOutputWithCrossAttentions, namely an optional stack of intermediate decoder activations, i.e. the output\n of each decoder layer, each of them gone through a layernorm. This is useful when training the model with auxiliary\n decoding losses.\n ') class ConditionalDetrDecoderOutput(BaseModelOutputWithCrossAttentions): ''' cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` and `config.add_cross_attention=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads. intermediate_hidden_states (`torch.FloatTensor` of shape `(config.decoder_layers, batch_size, num_queries, hidden_size)`, *optional*, returned when `config.auxiliary_loss=True`): Intermediate decoder activations, i.e. the output of each decoder layer, each of them gone through a layernorm. reference_points (`torch.FloatTensor` of shape `(config.decoder_layers, batch_size, num_queries, 2 (anchor points))`): Reference points (reference points of each layer of the decoder). ''' pass
3
1
0
0
0
0
0
8.67
1
0
0
0
0
0
0
0
31
2
3
3
2
26
3
3
2
0
2
0
0
1,344
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/conditional_detr/modeling_conditional_detr.py
transformers.models.conditional_detr.modeling_conditional_detr.ConditionalDetrEncoder
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithCrossAttentions, Seq2SeqModelOutput from ...modeling_attn_mask_utils import _prepare_4d_attention_mask from .configuration_conditional_detr import ConditionalDetrConfig from torch import Tensor, nn import torch class ConditionalDetrEncoder(ConditionalDetrPreTrainedModel): """ Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a [`ConditionalDetrEncoderLayer`]. The encoder updates the flattened feature map through multiple self-attention layers. Small tweak for ConditionalDETR: - object_queries are added to the forward pass. Args: config: ConditionalDetrConfig """ def __init__(self, config: ConditionalDetrConfig): super().__init__(config) self.dropout = config.dropout self.layerdrop = config.encoder_layerdrop self.layers = nn.ModuleList([ConditionalDetrEncoderLayer(config) for _ in range(config.encoder_layers)]) self.post_init() def forward(self, inputs_embeds=None, attention_mask=None, object_queries=None, output_attentions=None, output_hidden_states=None, return_dict=None): """ Args: inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Flattened feature map (output of the backbone + projection layer) that is passed to the encoder. attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding pixel features. Mask values selected in `[0, 1]`: - 1 for pixel features that are real (i.e. **not masked**), - 0 for pixel features that are padding (i.e. **masked**). [What are attention masks?](../glossary#attention-mask) object_queries (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Object queries that are added to the queries in each self-attention layer. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict hidden_states = inputs_embeds hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) if attention_mask is not None: attention_mask = _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype) encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None for i, encoder_layer in enumerate(self.layers): if output_hidden_states: encoder_states = encoder_states + (hidden_states,) to_drop = False if self.training: dropout_probability = torch.rand([]) if dropout_probability < self.layerdrop: to_drop = True if to_drop: layer_outputs = (None, None) else: layer_outputs = encoder_layer(hidden_states, attention_mask, object_queries=object_queries, output_attentions=output_attentions) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, encoder_states, all_attentions] if v is not None)) return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions)
class ConditionalDetrEncoder(ConditionalDetrPreTrainedModel): ''' Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a [`ConditionalDetrEncoderLayer`]. The encoder updates the flattened feature map through multiple self-attention layers. Small tweak for ConditionalDETR: - object_queries are added to the forward pass. Args: config: ConditionalDetrConfig ''' def __init__(self, config: ConditionalDetrConfig): pass def forward(self, inputs_embeds=None, attention_mask=None, object_queries=None, output_attentions=None, output_hidden_states=None, return_dict=None): ''' Args: inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Flattened feature map (output of the backbone + projection layer) that is passed to the encoder. attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding pixel features. Mask values selected in `[0, 1]`: - 1 for pixel features that are real (i.e. **not masked**), - 0 for pixel features that are padding (i.e. **masked**). [What are attention masks?](../glossary#attention-mask) object_queries (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Object queries that are added to the queries in each self-attention layer. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. ''' pass
3
2
48
9
27
14
8
0.67
1
7
3
0
2
3
2
3
112
23
54
21
43
36
36
13
33
15
2
3
16
1,345
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/conditional_detr/modeling_conditional_detr.py
transformers.models.conditional_detr.modeling_conditional_detr.ConditionalDetrEncoderLayer
from .configuration_conditional_detr import ConditionalDetrConfig from torch import Tensor, nn import torch from typing import Optional, Union from ...activations import ACT2FN class ConditionalDetrEncoderLayer(nn.Module): def __init__(self, config: ConditionalDetrConfig): super().__init__() self.embed_dim = config.d_model self.self_attn = DetrAttention(embed_dim=self.embed_dim, num_heads=config.encoder_attention_heads, dropout=config.attention_dropout) self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.activation_dropout = config.activation_dropout self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim) self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim) self.final_layer_norm = nn.LayerNorm(self.embed_dim) def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, object_queries: Optional[torch.Tensor]=None, output_attentions: bool=False): """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, target_len, source_len)` where padding elements are indicated by very large negative values. object_queries (`torch.FloatTensor`, *optional*): Object queries (also called content embeddings), to be added to the hidden states. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. """ residual = hidden_states hidden_states, attn_weights = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, object_queries=object_queries, output_attentions=output_attentions) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) residual = hidden_states hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) hidden_states = self.fc2(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.final_layer_norm(hidden_states) if self.training: if torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any(): clamp_value = torch.finfo(hidden_states.dtype).max - 1000 hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs
class ConditionalDetrEncoderLayer(nn.Module): def __init__(self, config: ConditionalDetrConfig): pass def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, object_queries: Optional[torch.Tensor]=None, output_attentions: bool=False): ''' Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, target_len, source_len)` where padding elements are indicated by very large negative values. object_queries (`torch.FloatTensor`, *optional*): Object queries (also called content embeddings), to be added to the hidden states. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. ''' pass
3
1
34
4
24
6
3
0.25
1
5
2
0
2
9
2
12
69
9
48
22
39
12
33
16
30
4
1
2
5
1,346
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/conditional_detr/modeling_conditional_detr.py
transformers.models.conditional_detr.modeling_conditional_detr.ConditionalDetrForObjectDetection
import torch from typing import Optional, Union from torch import Tensor, nn from ...utils import ModelOutput, auto_docstring, is_timm_available, logging, requires_backends from .configuration_conditional_detr import ConditionalDetrConfig @auto_docstring(custom_intro='\n Conditional DETR Model (consisting of a backbone and encoder-decoder Transformer) with object detection heads on\n top, for tasks such as COCO detection.\n ') class ConditionalDetrForObjectDetection(ConditionalDetrPreTrainedModel): def __init__(self, config: ConditionalDetrConfig): super().__init__(config) self.model = ConditionalDetrModel(config) self.class_labels_classifier = nn.Linear(config.d_model, config.num_labels) self.bbox_predictor = ConditionalDetrMLPPredictionHead(input_dim=config.d_model, hidden_dim=config.d_model, output_dim=4, num_layers=3) self.post_init() @torch.jit.unused def _set_aux_loss(self, outputs_class, outputs_coord): return [{'logits': a, 'pred_boxes': b} for a, b in zip(outputs_class[:-1], outputs_coord[:-1])] @auto_docstring def forward(self, pixel_values: torch.FloatTensor, pixel_mask: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.LongTensor]=None, encoder_outputs: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, decoder_inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[list[dict]]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.FloatTensor], ConditionalDetrObjectDetectionOutput]: """ decoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, num_queries)`, *optional*): Not used by default. Can be used to mask object queries. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing the flattened feature map (output of the backbone + projection layer), you can choose to directly pass a flattened representation of an image. decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_size)`, *optional*): Optionally, instead of initializing the queries with a tensor of zeros, you can choose to directly pass an embedded representation. labels (`list[Dict]` of len `(batch_size,)`, *optional*): Labels for computing the bipartite matching loss. List of dicts, each dictionary containing at least the following 2 keys: 'class_labels' and 'boxes' (the class labels and bounding boxes of an image in the batch respectively). The class labels themselves should be a `torch.LongTensor` of len `(number of bounding boxes in the image,)` and the boxes a `torch.FloatTensor` of shape `(number of bounding boxes in the image, 4)`. Examples: ```python >>> from transformers import AutoImageProcessor, AutoModelForObjectDetection >>> from PIL import Image >>> import requests >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> image_processor = AutoImageProcessor.from_pretrained("microsoft/conditional-detr-resnet-50") >>> model = AutoModelForObjectDetection.from_pretrained("microsoft/conditional-detr-resnet-50") >>> inputs = image_processor(images=image, return_tensors="pt") >>> outputs = model(**inputs) >>> # convert outputs (bounding boxes and class logits) to Pascal VOC format (xmin, ymin, xmax, ymax) >>> target_sizes = torch.tensor([image.size[::-1]]) >>> results = image_processor.post_process_object_detection(outputs, threshold=0.5, target_sizes=target_sizes)[ ... 0 ... ] >>> for score, label, box in zip(results["scores"], results["labels"], results["boxes"]): ... box = [round(i, 2) for i in box.tolist()] ... print( ... f"Detected {model.config.id2label[label.item()]} with confidence " ... f"{round(score.item(), 3)} at location {box}" ... ) Detected remote with confidence 0.833 at location [38.31, 72.1, 177.63, 118.45] Detected cat with confidence 0.831 at location [9.2, 51.38, 321.13, 469.0] Detected cat with confidence 0.804 at location [340.3, 16.85, 642.93, 370.95] Detected remote with confidence 0.683 at location [334.48, 73.49, 366.37, 190.01] Detected couch with confidence 0.535 at location [0.52, 1.19, 640.35, 475.1] ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.model(pixel_values, pixel_mask=pixel_mask, decoder_attention_mask=decoder_attention_mask, encoder_outputs=encoder_outputs, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = outputs[0] logits = self.class_labels_classifier(sequence_output) reference = outputs.reference_points if return_dict else outputs[-2] reference_before_sigmoid = inverse_sigmoid(reference).transpose(0, 1) hs = sequence_output tmp = self.bbox_predictor(hs) tmp[..., :2] += reference_before_sigmoid pred_boxes = tmp.sigmoid() loss, loss_dict, auxiliary_outputs = (None, None, None) if labels is not None: outputs_class, outputs_coord = (None, None) if self.config.auxiliary_loss: outputs_coords = [] intermediate = outputs.intermediate_hidden_states if return_dict else outputs[4] outputs_class = self.class_labels_classifier(intermediate) for lvl in range(intermediate.shape[0]): tmp = self.bbox_predictor(intermediate[lvl]) tmp[..., :2] += reference_before_sigmoid outputs_coord = tmp.sigmoid() outputs_coords.append(outputs_coord) outputs_coord = torch.stack(outputs_coords) loss, loss_dict, auxiliary_outputs = self.loss_function(logits, labels, self.device, pred_boxes, self.config, outputs_class, outputs_coord) if not return_dict: if auxiliary_outputs is not None: output = (logits, pred_boxes) + auxiliary_outputs + outputs else: output = (logits, pred_boxes) + outputs return (loss, loss_dict) + output if loss is not None else output return ConditionalDetrObjectDetectionOutput(loss=loss, loss_dict=loss_dict, logits=logits, pred_boxes=pred_boxes, auxiliary_outputs=auxiliary_outputs, last_hidden_state=outputs.last_hidden_state, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions)
null
7
1
48
6
27
15
4
0.54
1
9
4
0
3
3
3
4
151
21
85
35
66
46
40
21
36
10
2
3
12
1,347
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/conditional_detr/modeling_conditional_detr.py
transformers.models.conditional_detr.modeling_conditional_detr.ConditionalDetrForSegmentation
from .configuration_conditional_detr import ConditionalDetrConfig from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithCrossAttentions, Seq2SeqModelOutput from ...utils import ModelOutput, auto_docstring, is_timm_available, logging, requires_backends import torch from typing import Optional, Union @auto_docstring(custom_intro='\n Conditional DETR Model (consisting of a backbone and encoder-decoder Transformer) with a segmentation head on top,\n for tasks such as COCO panoptic.\n ') class ConditionalDetrForSegmentation(ConditionalDetrPreTrainedModel): def __init__(self, config: ConditionalDetrConfig): super().__init__(config) self.conditional_detr = ConditionalDetrForObjectDetection(config) hidden_size, number_of_heads = (config.d_model, config.encoder_attention_heads) intermediate_channel_sizes = self.conditional_detr.model.backbone.conv_encoder.intermediate_channel_sizes self.mask_head = ConditionalDetrMaskHeadSmallConv(hidden_size + number_of_heads, intermediate_channel_sizes[::-1][-3:], hidden_size) self.bbox_attention = ConditionalDetrMHAttentionMap(hidden_size, hidden_size, number_of_heads, dropout=0.0, std=config.init_xavier_std) self.post_init() @auto_docstring def forward(self, pixel_values: torch.FloatTensor, pixel_mask: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.FloatTensor]=None, encoder_outputs: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, decoder_inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[list[dict]]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.FloatTensor], ConditionalDetrSegmentationOutput]: """ decoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, num_queries)`, *optional*): Not used by default. Can be used to mask object queries. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing the flattened feature map (output of the backbone + projection layer), you can choose to directly pass a flattened representation of an image. decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_size)`, *optional*): Optionally, instead of initializing the queries with a tensor of zeros, you can choose to directly pass an embedded representation. labels (`list[Dict]` of len `(batch_size,)`, *optional*): Labels for computing the bipartite matching loss, DICE/F-1 loss and Focal loss. List of dicts, each dictionary containing at least the following 3 keys: 'class_labels', 'boxes' and 'masks' (the class labels, bounding boxes and segmentation masks of an image in the batch respectively). The class labels themselves should be a `torch.LongTensor` of len `(number of bounding boxes in the image,)`, the boxes a `torch.FloatTensor` of shape `(number of bounding boxes in the image, 4)` and the masks a `torch.FloatTensor` of shape `(number of bounding boxes in the image, height, width)`. Examples: ```python >>> import io >>> import requests >>> from PIL import Image >>> import torch >>> import numpy >>> from transformers import ( ... AutoImageProcessor, ... ConditionalDetrConfig, ... ConditionalDetrForSegmentation, ... ) >>> from transformers.image_transforms import rgb_to_id >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> image_processor = AutoImageProcessor.from_pretrained("microsoft/conditional-detr-resnet-50") >>> # randomly initialize all weights of the model >>> config = ConditionalDetrConfig() >>> model = ConditionalDetrForSegmentation(config) >>> # prepare image for the model >>> inputs = image_processor(images=image, return_tensors="pt") >>> # forward pass >>> outputs = model(**inputs) >>> # Use the `post_process_panoptic_segmentation` method of the `image_processor` to retrieve post-processed panoptic segmentation maps >>> # Segmentation results are returned as a list of dictionaries >>> result = image_processor.post_process_panoptic_segmentation(outputs, target_sizes=[(300, 500)]) >>> # A tensor of shape (height, width) where each value denotes a segment id, filled with -1 if no segment is found >>> panoptic_seg = result[0]["segmentation"] >>> # Get prediction score and segment_id to class_id mapping of each segment >>> panoptic_segments_info = result[0]["segments_info"] ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict batch_size, num_channels, height, width = pixel_values.shape device = pixel_values.device if pixel_mask is None: pixel_mask = torch.ones((batch_size, height, width), device=device) features, object_queries_list = self.conditional_detr.model.backbone(pixel_values, pixel_mask=pixel_mask) feature_map, mask = features[-1] batch_size, num_channels, height, width = feature_map.shape projected_feature_map = self.conditional_detr.model.input_projection(feature_map) flattened_features = projected_feature_map.flatten(2).permute(0, 2, 1) object_queries = object_queries_list[-1].flatten(2).permute(0, 2, 1) flattened_mask = mask.flatten(1) if encoder_outputs is None: encoder_outputs = self.conditional_detr.model.encoder(inputs_embeds=flattened_features, attention_mask=flattened_mask, object_queries=object_queries, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) elif return_dict and (not isinstance(encoder_outputs, BaseModelOutput)): encoder_outputs = BaseModelOutput(last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None) query_position_embeddings = self.conditional_detr.model.query_position_embeddings.weight.unsqueeze(0).repeat(batch_size, 1, 1) queries = torch.zeros_like(query_position_embeddings) decoder_outputs = self.conditional_detr.model.decoder(inputs_embeds=queries, attention_mask=None, object_queries=object_queries, query_position_embeddings=query_position_embeddings, encoder_hidden_states=encoder_outputs[0], encoder_attention_mask=flattened_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = decoder_outputs[0] logits = self.conditional_detr.class_labels_classifier(sequence_output) pred_boxes = self.conditional_detr.bbox_predictor(sequence_output).sigmoid() memory = encoder_outputs[0].permute(0, 2, 1).view(batch_size, self.config.d_model, height, width) mask = flattened_mask.view(batch_size, height, width) bbox_mask = self.bbox_attention(sequence_output, memory, mask=~mask) seg_masks = self.mask_head(projected_feature_map, bbox_mask, [features[2][0], features[1][0], features[0][0]]) pred_masks = seg_masks.view(batch_size, self.conditional_detr.config.num_queries, seg_masks.shape[-2], seg_masks.shape[-1]) loss, loss_dict, auxiliary_outputs = (None, None, None) if labels is not None: outputs_class, outputs_coord = (None, None) if self.config.auxiliary_loss: intermediate = decoder_outputs.intermediate_hidden_states if return_dict else decoder_outputs[-1] outputs_class = self.conditional_detr.class_labels_classifier(intermediate) outputs_coord = self.conditional_detr.bbox_predictor(intermediate).sigmoid() loss, loss_dict, auxiliary_outputs = self.loss_function(logits, labels, self.device, pred_boxes, pred_masks, self.config, outputs_class, outputs_coord) if not return_dict: if auxiliary_outputs is not None: output = (logits, pred_boxes, pred_masks) + auxiliary_outputs + decoder_outputs + encoder_outputs else: output = (logits, pred_boxes, pred_masks) + decoder_outputs + encoder_outputs return (loss, loss_dict) + output if loss is not None else output return ConditionalDetrSegmentationOutput(loss=loss, loss_dict=loss_dict, logits=logits, pred_boxes=pred_boxes, pred_masks=pred_masks, auxiliary_outputs=auxiliary_outputs, last_hidden_state=decoder_outputs.last_hidden_state, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions)
null
5
1
100
17
54
29
7
0.51
1
9
6
0
2
3
2
3
203
35
111
43
94
57
50
30
47
13
2
2
14
1,348
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/conditional_detr/modeling_conditional_detr.py
transformers.models.conditional_detr.modeling_conditional_detr.ConditionalDetrFrozenBatchNorm2d
import torch from torch import Tensor, nn class ConditionalDetrFrozenBatchNorm2d(nn.Module): """ BatchNorm2d where the batch statistics and the affine parameters are fixed. Copy-paste from torchvision.misc.ops with added eps before rqsrt, without which any other models than torchvision.models.resnet[18,34,50,101] produce nans. """ def __init__(self, n): super().__init__() self.register_buffer('weight', torch.ones(n)) self.register_buffer('bias', torch.zeros(n)) self.register_buffer('running_mean', torch.zeros(n)) self.register_buffer('running_var', torch.ones(n)) def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs): num_batches_tracked_key = prefix + 'num_batches_tracked' if num_batches_tracked_key in state_dict: del state_dict[num_batches_tracked_key] super()._load_from_state_dict(state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs) def forward(self, x): weight = self.weight.reshape(1, -1, 1, 1) bias = self.bias.reshape(1, -1, 1, 1) running_var = self.running_var.reshape(1, -1, 1, 1) running_mean = self.running_mean.reshape(1, -1, 1, 1) epsilon = 1e-05 scale = weight * (running_var + epsilon).rsqrt() bias = bias - running_mean * scale return x * scale + bias
class ConditionalDetrFrozenBatchNorm2d(nn.Module): ''' BatchNorm2d where the batch statistics and the affine parameters are fixed. Copy-paste from torchvision.misc.ops with added eps before rqsrt, without which any other models than torchvision.models.resnet[18,34,50,101] produce nans. ''' def __init__(self, n): pass def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs): pass def forward(self, x): pass
4
1
9
0
8
1
1
0.28
1
1
0
0
3
0
3
13
37
5
25
13
19
7
21
11
17
2
1
1
4
1,349
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/conditional_detr/modeling_conditional_detr.py
transformers.models.conditional_detr.modeling_conditional_detr.ConditionalDetrLearnedPositionEmbedding
from torch import Tensor, nn import torch class ConditionalDetrLearnedPositionEmbedding(nn.Module): """ This module learns positional embeddings up to a fixed maximum size. """ def __init__(self, embedding_dim=256): super().__init__() self.row_embeddings = nn.Embedding(50, embedding_dim) self.column_embeddings = nn.Embedding(50, embedding_dim) def forward(self, pixel_values, pixel_mask=None): height, width = pixel_values.shape[-2:] width_values = torch.arange(width, device=pixel_values.device) height_values = torch.arange(height, device=pixel_values.device) x_emb = self.column_embeddings(width_values) y_emb = self.row_embeddings(height_values) pos = torch.cat([x_emb.unsqueeze(0).repeat(height, 1, 1), y_emb.unsqueeze(1).repeat(1, width, 1)], dim=-1) pos = pos.permute(2, 0, 1) pos = pos.unsqueeze(0) pos = pos.repeat(pixel_values.shape[0], 1, 1, 1) return pos
class ConditionalDetrLearnedPositionEmbedding(nn.Module): ''' This module learns positional embeddings up to a fixed maximum size. ''' def __init__(self, embedding_dim=256): pass def forward(self, pixel_values, pixel_mask=None): pass
3
1
8
0
8
0
1
0.19
1
1
0
0
2
2
2
12
21
2
16
11
13
3
16
11
13
1
1
0
2
1,350
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/conditional_detr/modeling_conditional_detr.py
transformers.models.conditional_detr.modeling_conditional_detr.ConditionalDetrMHAttentionMap
from torch import Tensor, nn import torch from typing import Optional, Union class ConditionalDetrMHAttentionMap(nn.Module): """This is a 2D attention module, which only returns the attention softmax (no multiplication by value)""" def __init__(self, query_dim, hidden_dim, num_heads, dropout=0.0, bias=True, std=None): super().__init__() self.num_heads = num_heads self.hidden_dim = hidden_dim self.dropout = nn.Dropout(dropout) self.q_linear = nn.Linear(query_dim, hidden_dim, bias=bias) self.k_linear = nn.Linear(query_dim, hidden_dim, bias=bias) self.normalize_fact = float(hidden_dim / self.num_heads) ** (-0.5) def forward(self, q, k, mask: Optional[Tensor]=None): q = self.q_linear(q) k = nn.functional.conv2d(k, self.k_linear.weight.unsqueeze(-1).unsqueeze(-1), self.k_linear.bias) queries_per_head = q.view(q.shape[0], q.shape[1], self.num_heads, self.hidden_dim // self.num_heads) keys_per_head = k.view(k.shape[0], self.num_heads, self.hidden_dim // self.num_heads, k.shape[-2], k.shape[-1]) weights = torch.einsum('bqnc,bnchw->bqnhw', queries_per_head * self.normalize_fact, keys_per_head) if mask is not None: weights = weights.masked_fill(mask.unsqueeze(1).unsqueeze(1), torch.finfo(weights.dtype).min) weights = nn.functional.softmax(weights.flatten(2), dim=-1).view(weights.size()) weights = self.dropout(weights) return weights
class ConditionalDetrMHAttentionMap(nn.Module): '''This is a 2D attention module, which only returns the attention softmax (no multiplication by value)''' def __init__(self, query_dim, hidden_dim, num_heads, dropout=0.0, bias=True, std=None): pass def forward(self, q, k, mask: Optional[Tensor]=None): pass
3
1
11
2
10
0
2
0.05
1
3
0
0
2
6
2
12
26
5
20
12
17
1
20
12
17
2
1
1
3
1,351
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/conditional_detr/modeling_conditional_detr.py
transformers.models.conditional_detr.modeling_conditional_detr.ConditionalDetrMLPPredictionHead
from torch import Tensor, nn class ConditionalDetrMLPPredictionHead(nn.Module): """ Very simple multi-layer perceptron (MLP, also called FFN), used to predict the normalized center coordinates, height and width of a bounding box w.r.t. an image. Copied from https://github.com/facebookresearch/detr/blob/master/models/detr.py """ def __init__(self, input_dim, hidden_dim, output_dim, num_layers): super().__init__() self.num_layers = num_layers h = [hidden_dim] * (num_layers - 1) self.layers = nn.ModuleList((nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim]))) def forward(self, x): for i, layer in enumerate(self.layers): x = nn.functional.relu(layer(x)) if i < self.num_layers - 1 else layer(x) return x
class ConditionalDetrMLPPredictionHead(nn.Module): ''' Very simple multi-layer perceptron (MLP, also called FFN), used to predict the normalized center coordinates, height and width of a bounding box w.r.t. an image. Copied from https://github.com/facebookresearch/detr/blob/master/models/detr.py ''' def __init__(self, input_dim, hidden_dim, output_dim, num_layers): pass def forward(self, x): pass
3
1
5
0
5
0
2
0.5
1
3
0
0
2
2
2
12
19
4
10
7
7
5
10
7
7
3
1
1
4
1,352
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/conditional_detr/modeling_conditional_detr.py
transformers.models.conditional_detr.modeling_conditional_detr.ConditionalDetrMaskHeadSmallConv
import torch from torch import Tensor, nn class ConditionalDetrMaskHeadSmallConv(nn.Module): """ Simple convolutional head, using group norm. Upsampling is done using a FPN approach """ def __init__(self, dim, fpn_dims, context_dim): super().__init__() if dim % 8 != 0: raise ValueError('The hidden_size + number of attention heads must be divisible by 8 as the number of groups in GroupNorm is set to 8') inter_dims = [dim, context_dim // 2, context_dim // 4, context_dim // 8, context_dim // 16, context_dim // 64] self.lay1 = nn.Conv2d(dim, dim, 3, padding=1) self.gn1 = nn.GroupNorm(8, dim) self.lay2 = nn.Conv2d(dim, inter_dims[1], 3, padding=1) self.gn2 = nn.GroupNorm(min(8, inter_dims[1]), inter_dims[1]) self.lay3 = nn.Conv2d(inter_dims[1], inter_dims[2], 3, padding=1) self.gn3 = nn.GroupNorm(min(8, inter_dims[2]), inter_dims[2]) self.lay4 = nn.Conv2d(inter_dims[2], inter_dims[3], 3, padding=1) self.gn4 = nn.GroupNorm(min(8, inter_dims[3]), inter_dims[3]) self.lay5 = nn.Conv2d(inter_dims[3], inter_dims[4], 3, padding=1) self.gn5 = nn.GroupNorm(min(8, inter_dims[4]), inter_dims[4]) self.out_lay = nn.Conv2d(inter_dims[4], 1, 3, padding=1) self.dim = dim self.adapter1 = nn.Conv2d(fpn_dims[0], inter_dims[1], 1) self.adapter2 = nn.Conv2d(fpn_dims[1], inter_dims[2], 1) self.adapter3 = nn.Conv2d(fpn_dims[2], inter_dims[3], 1) for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_uniform_(m.weight, a=1) nn.init.constant_(m.bias, 0) def forward(self, x: Tensor, bbox_mask: Tensor, fpns: list[Tensor]): x = torch.cat([_expand(x, bbox_mask.shape[1]), bbox_mask.flatten(0, 1)], 1) x = self.lay1(x) x = self.gn1(x) x = nn.functional.relu(x) x = self.lay2(x) x = self.gn2(x) x = nn.functional.relu(x) cur_fpn = self.adapter1(fpns[0]) if cur_fpn.size(0) != x.size(0): cur_fpn = _expand(cur_fpn, x.size(0) // cur_fpn.size(0)) x = cur_fpn + nn.functional.interpolate(x, size=cur_fpn.shape[-2:], mode='nearest') x = self.lay3(x) x = self.gn3(x) x = nn.functional.relu(x) cur_fpn = self.adapter2(fpns[1]) if cur_fpn.size(0) != x.size(0): cur_fpn = _expand(cur_fpn, x.size(0) // cur_fpn.size(0)) x = cur_fpn + nn.functional.interpolate(x, size=cur_fpn.shape[-2:], mode='nearest') x = self.lay4(x) x = self.gn4(x) x = nn.functional.relu(x) cur_fpn = self.adapter3(fpns[2]) if cur_fpn.size(0) != x.size(0): cur_fpn = _expand(cur_fpn, x.size(0) // cur_fpn.size(0)) x = cur_fpn + nn.functional.interpolate(x, size=cur_fpn.shape[-2:], mode='nearest') x = self.lay5(x) x = self.gn5(x) x = nn.functional.relu(x) x = self.out_lay(x) return x
class ConditionalDetrMaskHeadSmallConv(nn.Module): ''' Simple convolutional head, using group norm. Upsampling is done using a FPN approach ''' def __init__(self, dim, fpn_dims, context_dim): pass def forward(self, x: Tensor, bbox_mask: Tensor, fpns: list[Tensor]): pass
3
1
36
6
29
2
4
0.1
1
3
0
0
2
15
2
12
78
13
59
21
56
6
56
21
53
4
1
2
8
1,353
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/conditional_detr/modeling_conditional_detr.py
transformers.models.conditional_detr.modeling_conditional_detr.ConditionalDetrModel
from typing import Optional, Union import torch from ...utils import ModelOutput, auto_docstring, is_timm_available, logging, requires_backends from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithCrossAttentions, Seq2SeqModelOutput from torch import Tensor, nn from .configuration_conditional_detr import ConditionalDetrConfig @auto_docstring(custom_intro='\n The bare Conditional DETR Model (consisting of a backbone and encoder-decoder Transformer) outputting raw\n hidden-states without any specific head on top.\n ') class ConditionalDetrModel(ConditionalDetrPreTrainedModel): def __init__(self, config: ConditionalDetrConfig): super().__init__(config) backbone = ConditionalDetrConvEncoder(config) object_queries = build_position_encoding(config) self.backbone = ConditionalDetrConvModel(backbone, object_queries) self.input_projection = nn.Conv2d(backbone.intermediate_channel_sizes[-1], config.d_model, kernel_size=1) self.query_position_embeddings = nn.Embedding(config.num_queries, config.d_model) self.encoder = ConditionalDetrEncoder(config) self.decoder = ConditionalDetrDecoder(config) self.post_init() def get_encoder(self): return self.encoder def freeze_backbone(self): for name, param in self.backbone.conv_encoder.model.named_parameters(): param.requires_grad_(False) def unfreeze_backbone(self): for name, param in self.backbone.conv_encoder.model.named_parameters(): param.requires_grad_(True) @auto_docstring def forward(self, pixel_values: torch.FloatTensor, pixel_mask: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.LongTensor]=None, encoder_outputs: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, decoder_inputs_embeds: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.FloatTensor], ConditionalDetrModelOutput]: """ decoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, num_queries)`, *optional*): Not used by default. Can be used to mask object queries. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing the flattened feature map (output of the backbone + projection layer), you can choose to directly pass a flattened representation of an image. decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_size)`, *optional*): Optionally, instead of initializing the queries with a tensor of zeros, you can choose to directly pass an embedded representation. Examples: ```python >>> from transformers import AutoImageProcessor, AutoModel >>> from PIL import Image >>> import requests >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> image_processor = AutoImageProcessor.from_pretrained("microsoft/conditional-detr-resnet-50") >>> model = AutoModel.from_pretrained("microsoft/conditional-detr-resnet-50") >>> # prepare image for the model >>> inputs = image_processor(images=image, return_tensors="pt") >>> # forward pass >>> outputs = model(**inputs) >>> # the last hidden states are the final query embeddings of the Transformer decoder >>> # these are of shape (batch_size, num_queries, hidden_size) >>> last_hidden_states = outputs.last_hidden_state >>> list(last_hidden_states.shape) [1, 300, 256] ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict batch_size, num_channels, height, width = pixel_values.shape device = pixel_values.device if pixel_mask is None: pixel_mask = torch.ones((batch_size, height, width), device=device) features, object_queries_list = self.backbone(pixel_values, pixel_mask) feature_map, mask = features[-1] if mask is None: raise ValueError('Backbone does not return downsampled pixel mask') projected_feature_map = self.input_projection(feature_map) flattened_features = projected_feature_map.flatten(2).permute(0, 2, 1) object_queries = object_queries_list[-1].flatten(2).permute(0, 2, 1) flattened_mask = mask.flatten(1) if encoder_outputs is None: encoder_outputs = self.encoder(inputs_embeds=flattened_features, attention_mask=flattened_mask, object_queries=object_queries, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) elif return_dict and (not isinstance(encoder_outputs, BaseModelOutput)): encoder_outputs = BaseModelOutput(last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None) query_position_embeddings = self.query_position_embeddings.weight.unsqueeze(0).repeat(batch_size, 1, 1) queries = torch.zeros_like(query_position_embeddings) decoder_outputs = self.decoder(inputs_embeds=queries, attention_mask=None, object_queries=object_queries, query_position_embeddings=query_position_embeddings, encoder_hidden_states=encoder_outputs[0], encoder_attention_mask=flattened_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) if not return_dict: return decoder_outputs + encoder_outputs return ConditionalDetrModelOutput(last_hidden_state=decoder_outputs.last_hidden_state, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, intermediate_hidden_states=decoder_outputs.intermediate_hidden_states, reference_points=decoder_outputs.reference_points)
null
8
1
25
4
15
6
3
0.4
1
10
7
0
6
5
6
7
160
30
93
39
73
37
46
27
39
11
2
1
18
1,354
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/conditional_detr/modeling_conditional_detr.py
transformers.models.conditional_detr.modeling_conditional_detr.ConditionalDetrObjectDetectionOutput
from typing import Optional, Union from ...utils import ModelOutput, auto_docstring, is_timm_available, logging, requires_backends import torch from dataclasses import dataclass @dataclass @auto_docstring(custom_intro='\n Output type of [`ConditionalDetrForObjectDetection`].\n ') class ConditionalDetrObjectDetectionOutput(ModelOutput): """ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` are provided)): Total loss as a linear combination of a negative log-likehood (cross-entropy) for class prediction and a bounding box loss. The latter is defined as a linear combination of the L1 loss and the generalized scale-invariant IoU loss. loss_dict (`Dict`, *optional*): A dictionary containing the individual losses. Useful for logging. logits (`torch.FloatTensor` of shape `(batch_size, num_queries, num_classes + 1)`): Classification logits (including no-object) for all queries. pred_boxes (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)`): Normalized boxes coordinates for all queries, represented as (center_x, center_y, width, height). These values are normalized in [0, 1], relative to the size of each individual image in the batch (disregarding possible padding). You can use [`~ConditionalDetrImageProcessor.post_process_object_detection`] to retrieve the unnormalized bounding boxes. auxiliary_outputs (`list[Dict]`, *optional*): Optional, only returned when auxiliary losses are activated (i.e. `config.auxiliary_loss` is set to `True`) and labels are provided. It is a list of dictionaries containing the two above keys (`logits` and `pred_boxes`) for each decoder layer. last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the decoder of the model. """ loss: Optional[torch.FloatTensor] = None loss_dict: Optional[dict] = None logits: Optional[torch.FloatTensor] = None pred_boxes: Optional[torch.FloatTensor] = None auxiliary_outputs: Optional[list[dict]] = None last_hidden_state: Optional[torch.FloatTensor] = None decoder_hidden_states: Optional[tuple[torch.FloatTensor]] = None decoder_attentions: Optional[tuple[torch.FloatTensor]] = None cross_attentions: Optional[tuple[torch.FloatTensor]] = None encoder_last_hidden_state: Optional[torch.FloatTensor] = None encoder_hidden_states: Optional[tuple[torch.FloatTensor]] = None encoder_attentions: Optional[tuple[torch.FloatTensor]] = None
@dataclass @auto_docstring(custom_intro='\n Output type of [`ConditionalDetrForObjectDetection`].\n ') class ConditionalDetrObjectDetectionOutput(ModelOutput): ''' loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` are provided)): Total loss as a linear combination of a negative log-likehood (cross-entropy) for class prediction and a bounding box loss. The latter is defined as a linear combination of the L1 loss and the generalized scale-invariant IoU loss. loss_dict (`Dict`, *optional*): A dictionary containing the individual losses. Useful for logging. logits (`torch.FloatTensor` of shape `(batch_size, num_queries, num_classes + 1)`): Classification logits (including no-object) for all queries. pred_boxes (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)`): Normalized boxes coordinates for all queries, represented as (center_x, center_y, width, height). These values are normalized in [0, 1], relative to the size of each individual image in the batch (disregarding possible padding). You can use [`~ConditionalDetrImageProcessor.post_process_object_detection`] to retrieve the unnormalized bounding boxes. auxiliary_outputs (`list[Dict]`, *optional*): Optional, only returned when auxiliary losses are activated (i.e. `config.auxiliary_loss` is set to `True`) and labels are provided. It is a list of dictionaries containing the two above keys (`logits` and `pred_boxes`) for each decoder layer. last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the decoder of the model. ''' pass
3
1
0
0
0
0
0
3.46
1
0
0
0
0
0
0
0
60
2
13
13
12
45
13
13
12
0
1
0
0
1,355
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/conditional_detr/modeling_conditional_detr.py
transformers.models.conditional_detr.modeling_conditional_detr.ConditionalDetrPreTrainedModel
from ...utils import ModelOutput, auto_docstring, is_timm_available, logging, requires_backends from .configuration_conditional_detr import ConditionalDetrConfig from ...modeling_utils import PreTrainedModel from torch import Tensor, nn @auto_docstring class ConditionalDetrPreTrainedModel(PreTrainedModel): config: ConditionalDetrConfig base_model_prefix = 'model' main_input_name = 'pixel_values' _no_split_modules = ['ConditionalDetrConvEncoder', 'ConditionalDetrEncoderLayer', 'ConditionalDetrDecoderLayer'] def _init_weights(self, module): std = self.config.init_std xavier_std = self.config.init_xavier_std if isinstance(module, ConditionalDetrMHAttentionMap): nn.init.zeros_(module.k_linear.bias) nn.init.zeros_(module.q_linear.bias) nn.init.xavier_uniform_(module.k_linear.weight, gain=xavier_std) nn.init.xavier_uniform_(module.q_linear.weight, gain=xavier_std) elif isinstance(module, ConditionalDetrLearnedPositionEmbedding): nn.init.uniform_(module.row_embeddings.weight) nn.init.uniform_(module.column_embeddings.weight) if isinstance(module, (nn.Linear, nn.Conv2d, nn.BatchNorm2d)): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_()
@auto_docstring class ConditionalDetrPreTrainedModel(PreTrainedModel): def _init_weights(self, module): pass
3
0
22
1
19
2
7
0.08
1
2
2
5
1
0
1
1
28
2
24
8
22
2
22
8
20
7
1
2
7
1,356
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/conditional_detr/modeling_conditional_detr.py
transformers.models.conditional_detr.modeling_conditional_detr.ConditionalDetrSegmentationOutput
from ...utils import ModelOutput, auto_docstring, is_timm_available, logging, requires_backends import torch from typing import Optional, Union from dataclasses import dataclass @dataclass @auto_docstring(custom_intro='\n Output type of [`ConditionalDetrForSegmentation`].\n ') class ConditionalDetrSegmentationOutput(ModelOutput): """ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` are provided)): Total loss as a linear combination of a negative log-likehood (cross-entropy) for class prediction and a bounding box loss. The latter is defined as a linear combination of the L1 loss and the generalized scale-invariant IoU loss. loss_dict (`Dict`, *optional*): A dictionary containing the individual losses. Useful for logging. logits (`torch.FloatTensor` of shape `(batch_size, num_queries, num_classes + 1)`): Classification logits (including no-object) for all queries. pred_boxes (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)`): Normalized boxes coordinates for all queries, represented as (center_x, center_y, width, height). These values are normalized in [0, 1], relative to the size of each individual image in the batch (disregarding possible padding). You can use [`~ConditionalDetrImageProcessor.post_process_object_detection`] to retrieve the unnormalized bounding boxes. pred_masks (`torch.FloatTensor` of shape `(batch_size, num_queries, height/4, width/4)`): Segmentation masks logits for all queries. See also [`~ConditionalDetrImageProcessor.post_process_semantic_segmentation`] or [`~ConditionalDetrImageProcessor.post_process_instance_segmentation`] [`~ConditionalDetrImageProcessor.post_process_panoptic_segmentation`] to evaluate semantic, instance and panoptic segmentation masks respectively. auxiliary_outputs (`list[Dict]`, *optional*): Optional, only returned when auxiliary losses are activated (i.e. `config.auxiliary_loss` is set to `True`) and labels are provided. It is a list of dictionaries containing the two above keys (`logits` and `pred_boxes`) for each decoder layer. last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the decoder of the model. """ loss: Optional[torch.FloatTensor] = None loss_dict: Optional[dict] = None logits: Optional[torch.FloatTensor] = None pred_boxes: Optional[torch.FloatTensor] = None pred_masks: Optional[torch.FloatTensor] = None auxiliary_outputs: Optional[list[dict]] = None last_hidden_state: Optional[torch.FloatTensor] = None decoder_hidden_states: Optional[tuple[torch.FloatTensor]] = None decoder_attentions: Optional[tuple[torch.FloatTensor]] = None cross_attentions: Optional[tuple[torch.FloatTensor]] = None encoder_last_hidden_state: Optional[torch.FloatTensor] = None encoder_hidden_states: Optional[tuple[torch.FloatTensor]] = None encoder_attentions: Optional[tuple[torch.FloatTensor]] = None
@dataclass @auto_docstring(custom_intro='\n Output type of [`ConditionalDetrForSegmentation`].\n ') class ConditionalDetrSegmentationOutput(ModelOutput): ''' loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` are provided)): Total loss as a linear combination of a negative log-likehood (cross-entropy) for class prediction and a bounding box loss. The latter is defined as a linear combination of the L1 loss and the generalized scale-invariant IoU loss. loss_dict (`Dict`, *optional*): A dictionary containing the individual losses. Useful for logging. logits (`torch.FloatTensor` of shape `(batch_size, num_queries, num_classes + 1)`): Classification logits (including no-object) for all queries. pred_boxes (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)`): Normalized boxes coordinates for all queries, represented as (center_x, center_y, width, height). These values are normalized in [0, 1], relative to the size of each individual image in the batch (disregarding possible padding). You can use [`~ConditionalDetrImageProcessor.post_process_object_detection`] to retrieve the unnormalized bounding boxes. pred_masks (`torch.FloatTensor` of shape `(batch_size, num_queries, height/4, width/4)`): Segmentation masks logits for all queries. See also [`~ConditionalDetrImageProcessor.post_process_semantic_segmentation`] or [`~ConditionalDetrImageProcessor.post_process_instance_segmentation`] [`~ConditionalDetrImageProcessor.post_process_panoptic_segmentation`] to evaluate semantic, instance and panoptic segmentation masks respectively. auxiliary_outputs (`list[Dict]`, *optional*): Optional, only returned when auxiliary losses are activated (i.e. `config.auxiliary_loss` is set to `True`) and labels are provided. It is a list of dictionaries containing the two above keys (`logits` and `pred_boxes`) for each decoder layer. last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the decoder of the model. ''' pass
3
1
0
0
0
0
0
3.64
1
0
0
0
0
0
0
0
67
2
14
14
13
51
14
14
13
0
1
0
0
1,357
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/conditional_detr/modeling_conditional_detr.py
transformers.models.conditional_detr.modeling_conditional_detr.ConditionalDetrSinePositionEmbedding
import math from torch import Tensor, nn import torch class ConditionalDetrSinePositionEmbedding(nn.Module): """ This is a more standard version of the position embedding, very similar to the one used by the Attention is all you need paper, generalized to work on images. """ def __init__(self, embedding_dim=64, temperature=10000, normalize=False, scale=None): super().__init__() self.embedding_dim = embedding_dim self.temperature = temperature self.normalize = normalize if scale is not None and normalize is False: raise ValueError('normalize should be True if scale is passed') if scale is None: scale = 2 * math.pi self.scale = scale def forward(self, pixel_values, pixel_mask): if pixel_mask is None: raise ValueError('No pixel mask provided') y_embed = pixel_mask.cumsum(1, dtype=torch.float32) x_embed = pixel_mask.cumsum(2, dtype=torch.float32) if self.normalize: y_embed = y_embed / (y_embed[:, -1:, :] + 1e-06) * self.scale x_embed = x_embed / (x_embed[:, :, -1:] + 1e-06) * self.scale dim_t = torch.arange(self.embedding_dim, dtype=torch.int64, device=pixel_values.device).float() dim_t = self.temperature ** (2 * torch.div(dim_t, 2, rounding_mode='floor') / self.embedding_dim) pos_x = x_embed[:, :, :, None] / dim_t pos_y = y_embed[:, :, :, None] / dim_t pos_x = torch.stack((pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4).flatten(3) pos_y = torch.stack((pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4).flatten(3) pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2) return pos
class ConditionalDetrSinePositionEmbedding(nn.Module): ''' This is a more standard version of the position embedding, very similar to the one used by the Attention is all you need paper, generalized to work on images. ''' def __init__(self, embedding_dim=64, temperature=10000, normalize=False, scale=None): pass def forward(self, pixel_values, pixel_mask): pass
3
1
14
1
13
0
3
0.15
1
2
0
0
2
4
2
12
35
4
27
13
24
4
27
13
24
3
1
1
6
1,358
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/conditional_detr/modeling_conditional_detr.py
transformers.models.conditional_detr.modeling_conditional_detr.MLP
from torch import Tensor, nn class MLP(nn.Module): """ Very simple multi-layer perceptron (MLP, also called FFN), used to predict the normalized center coordinates, height and width of a bounding box w.r.t. an image. Copied from https://github.com/facebookresearch/detr/blob/master/models/detr.py """ def __init__(self, input_dim, hidden_dim, output_dim, num_layers): super().__init__() self.num_layers = num_layers h = [hidden_dim] * (num_layers - 1) self.layers = nn.ModuleList((nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim]))) def forward(self, x): for i, layer in enumerate(self.layers): x = nn.functional.relu(layer(x)) if i < self.num_layers - 1 else layer(x) return x
class MLP(nn.Module): ''' Very simple multi-layer perceptron (MLP, also called FFN), used to predict the normalized center coordinates, height and width of a bounding box w.r.t. an image. Copied from https://github.com/facebookresearch/detr/blob/master/models/detr.py ''' def __init__(self, input_dim, hidden_dim, output_dim, num_layers): pass def forward(self, x): pass
3
1
5
0
5
0
2
0.5
1
3
0
0
2
2
2
12
19
4
10
7
7
5
10
7
7
3
1
1
4
1,359
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/convbert/configuration_convbert.py
transformers.models.convbert.configuration_convbert.ConvBertConfig
from ...configuration_utils import PretrainedConfig class ConvBertConfig(PretrainedConfig): """ This is the configuration class to store the configuration of a [`ConvBertModel`]. It is used to instantiate an ConvBERT model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the ConvBERT [YituTech/conv-bert-base](https://huggingface.co/YituTech/conv-bert-base) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 30522): Vocabulary size of the ConvBERT model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`ConvBertModel`] or [`TFConvBertModel`]. hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (`int`, *optional*, defaults to 3072): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. hidden_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout ratio for the attention probabilities. max_position_embeddings (`int`, *optional*, defaults to 512): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). type_vocab_size (`int`, *optional*, defaults to 2): The vocabulary size of the `token_type_ids` passed when calling [`ConvBertModel`] or [`TFConvBertModel`]. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-12): The epsilon used by the layer normalization layers. head_ratio (`int`, *optional*, defaults to 2): Ratio gamma to reduce the number of attention heads. num_groups (`int`, *optional*, defaults to 1): The number of groups for grouped linear layers for ConvBert model conv_kernel_size (`int`, *optional*, defaults to 9): The size of the convolutional kernel. classifier_dropout (`float`, *optional*): The dropout ratio for the classification head. Example: ```python >>> from transformers import ConvBertConfig, ConvBertModel >>> # Initializing a ConvBERT convbert-base-uncased style configuration >>> configuration = ConvBertConfig() >>> # Initializing a model (with random weights) from the convbert-base-uncased style configuration >>> model = ConvBertModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = 'convbert' def __init__(self, vocab_size=30522, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, pad_token_id=1, bos_token_id=0, eos_token_id=2, embedding_size=768, head_ratio=2, conv_kernel_size=9, num_groups=1, classifier_dropout=None, **kwargs): super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.embedding_size = embedding_size self.head_ratio = head_ratio self.conv_kernel_size = conv_kernel_size self.num_groups = num_groups self.classifier_dropout = classifier_dropout
class ConvBertConfig(PretrainedConfig): ''' This is the configuration class to store the configuration of a [`ConvBertModel`]. It is used to instantiate an ConvBERT model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the ConvBERT [YituTech/conv-bert-base](https://huggingface.co/YituTech/conv-bert-base) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 30522): Vocabulary size of the ConvBERT model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`ConvBertModel`] or [`TFConvBertModel`]. hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (`int`, *optional*, defaults to 3072): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. hidden_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout ratio for the attention probabilities. max_position_embeddings (`int`, *optional*, defaults to 512): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). type_vocab_size (`int`, *optional*, defaults to 2): The vocabulary size of the `token_type_ids` passed when calling [`ConvBertModel`] or [`TFConvBertModel`]. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-12): The epsilon used by the layer normalization layers. head_ratio (`int`, *optional*, defaults to 2): Ratio gamma to reduce the number of attention heads. num_groups (`int`, *optional*, defaults to 1): The number of groups for grouped linear layers for ConvBert model conv_kernel_size (`int`, *optional*, defaults to 9): The size of the convolutional kernel. classifier_dropout (`float`, *optional*): The dropout ratio for the classification head. Example: ```python >>> from transformers import ConvBertConfig, ConvBertModel >>> # Initializing a ConvBERT convbert-base-uncased style configuration >>> configuration = ConvBertConfig() >>> # Initializing a model (with random weights) from the convbert-base-uncased style configuration >>> model = ConvBertModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```''' def __init__(self, vocab_size=30522, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, pad_token_id=1, bos_token_id=0, eos_token_id=2, embedding_size=768, head_ratio=2, conv_kernel_size=9, num_groups=1, classifier_dropout=None, **kwargs): pass
2
1
48
1
47
0
1
1.08
1
1
0
0
1
17
1
1
113
11
49
43
24
53
21
20
19
1
1
0
1
1,360
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/convbert/configuration_convbert.py
transformers.models.convbert.configuration_convbert.ConvBertOnnxConfig
from collections.abc import Mapping from collections import OrderedDict from ...onnx import OnnxConfig class ConvBertOnnxConfig(OnnxConfig): @property def inputs(self) -> Mapping[str, Mapping[int, str]]: if self.task == 'multiple-choice': dynamic_axis = {0: 'batch', 1: 'choice', 2: 'sequence'} else: dynamic_axis = {0: 'batch', 1: 'sequence'} return OrderedDict([('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis)])
class ConvBertOnnxConfig(OnnxConfig): @property def inputs(self) -> Mapping[str, Mapping[int, str]]: pass
3
0
12
0
12
0
2
0
1
3
0
0
1
0
1
1
14
0
14
4
11
0
6
3
4
2
1
1
2
1,361
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/convbert/modeling_convbert.py
transformers.models.convbert.modeling_convbert.ConvBertAttention
import torch from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer from torch import nn from typing import Callable, Optional, Union class ConvBertAttention(nn.Module): def __init__(self, config): super().__init__() self.self = ConvBertSelfAttention(config) self.output = ConvBertSelfOutput(config) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices(heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads) self.self.query = prune_linear_layer(self.self.query, index) self.self.key = prune_linear_layer(self.self.key, index) self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=False) -> tuple[torch.Tensor, Optional[torch.FloatTensor]]: self_outputs = self.self(hidden_states, attention_mask, head_mask, encoder_hidden_states, output_attentions) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] return outputs
class ConvBertAttention(nn.Module): def __init__(self, config): pass def prune_heads(self, heads): pass def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=False) -> tuple[torch.Tensor, Optional[torch.FloatTensor]]: pass
4
0
13
1
12
1
1
0.08
1
6
2
0
3
3
3
13
43
4
37
18
26
3
22
11
18
2
1
1
4
1,362
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/convbert/modeling_convbert.py
transformers.models.convbert.modeling_convbert.ConvBertClassificationHead
from torch import nn from ...activations import ACT2FN, get_activation import torch class ConvBertClassificationHead(nn.Module): """Head for sentence-level classification tasks.""" def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) classifier_dropout = config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob self.dropout = nn.Dropout(classifier_dropout) self.out_proj = nn.Linear(config.hidden_size, config.num_labels) self.config = config def forward(self, hidden_states: torch.Tensor, **kwargs) -> torch.Tensor: x = hidden_states[:, 0, :] x = self.dropout(x) x = self.dense(x) x = ACT2FN[self.config.hidden_act](x) x = self.dropout(x) x = self.out_proj(x) return x
class ConvBertClassificationHead(nn.Module): '''Head for sentence-level classification tasks.''' def __init__(self, config): pass def forward(self, hidden_states: torch.Tensor, **kwargs) -> torch.Tensor: pass
3
1
9
1
9
1
2
0.11
1
2
0
0
2
4
2
12
22
3
18
9
15
2
16
9
13
2
1
0
3
1,363
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/convbert/modeling_convbert.py
transformers.models.convbert.modeling_convbert.ConvBertEmbeddings
import torch from torch import nn from typing import Callable, Optional, Union class ConvBertEmbeddings(nn.Module): """Construct the embeddings from word, position and token_type embeddings.""" def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.embedding_size, padding_idx=config.pad_token_id) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.embedding_size) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.embedding_size) self.LayerNorm = nn.LayerNorm(config.embedding_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.register_buffer('position_ids', torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False) self.register_buffer('token_type_ids', torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False) def forward(self, input_ids: Optional[torch.LongTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None) -> torch.LongTensor: if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] seq_length = input_shape[1] if position_ids is None: position_ids = self.position_ids[:, :seq_length] if token_type_ids is None: if hasattr(self, 'token_type_ids'): buffered_token_type_ids = self.token_type_ids[:, :seq_length] buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length) token_type_ids = buffered_token_type_ids_expanded else: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) position_embeddings = self.position_embeddings(position_ids) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = inputs_embeds + position_embeddings + token_type_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings
class ConvBertEmbeddings(nn.Module): '''Construct the embeddings from word, position and token_type embeddings.''' def __init__(self, config): pass def forward(self, input_ids: Optional[torch.LongTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None) -> torch.LongTensor: pass
3
1
27
3
21
3
4
0.16
1
1
0
0
2
5
2
12
58
8
43
21
34
7
31
15
28
6
1
2
7
1,364
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/convbert/modeling_convbert.py
transformers.models.convbert.modeling_convbert.ConvBertEncoder
from typing import Callable, Optional, Union from torch import nn import torch from ...modeling_outputs import BaseModelOutputWithCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput class ConvBertEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.layer = nn.ModuleList([ConvBertLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=False, output_hidden_states: Optional[bool]=False, return_dict: Optional[bool]=True) -> Union[tuple, BaseModelOutputWithCrossAttentions]: all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None layer_outputs = layer_module(hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, output_attentions) hidden_states = layer_outputs[0] if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if self.config.add_cross_attention: all_cross_attentions = all_cross_attentions + (layer_outputs[2],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, all_hidden_states, all_self_attentions, all_cross_attentions] if v is not None)) return BaseModelOutputWithCrossAttentions(last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions, cross_attentions=all_cross_attentions)
class ConvBertEncoder(nn.Module): def __init__(self, config): pass def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=False, output_hidden_states: Optional[bool]=False, return_dict: Optional[bool]=True) -> Union[tuple, BaseModelOutputWithCrossAttentions]: pass
3
0
33
2
31
0
7
0
1
8
2
0
2
3
2
12
67
5
62
22
49
0
27
12
24
12
1
3
13
1,365
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/convbert/modeling_convbert.py
transformers.models.convbert.modeling_convbert.ConvBertForMaskedLM
from ...utils import auto_docstring, logging from torch import nn from ...modeling_outputs import BaseModelOutputWithCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput import torch from typing import Callable, Optional, Union @auto_docstring class ConvBertForMaskedLM(ConvBertPreTrainedModel): _tied_weights_keys = ['generator.lm_head.weight'] def __init__(self, config): super().__init__(config) self.convbert = ConvBertModel(config) self.generator_predictions = ConvBertGeneratorPredictions(config) self.generator_lm_head = nn.Linear(config.embedding_size, config.vocab_size) self.post_init() def get_output_embeddings(self): return self.generator_lm_head def set_output_embeddings(self, word_embeddings): self.generator_lm_head = word_embeddings @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, MaskedLMOutput]: """ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict generator_hidden_states = self.convbert(input_ids, attention_mask, token_type_ids, position_ids, head_mask, inputs_embeds, output_attentions, output_hidden_states, return_dict) generator_sequence_output = generator_hidden_states[0] prediction_scores = self.generator_predictions(generator_sequence_output) prediction_scores = self.generator_lm_head(prediction_scores) loss = None if labels is not None: loss_fct = nn.CrossEntropyLoss() loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (prediction_scores,) + generator_hidden_states[1:] return (loss,) + output if loss is not None else output return MaskedLMOutput(loss=loss, logits=prediction_scores, hidden_states=generator_hidden_states.hidden_states, attentions=generator_hidden_states.attentions)
@auto_docstring class ConvBertForMaskedLM(ConvBertPreTrainedModel): def __init__(self, config): pass def get_output_embeddings(self): pass def set_output_embeddings(self, word_embeddings): pass @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, MaskedLMOutput]: ''' labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` ''' pass
7
1
17
2
13
2
2
0.15
1
5
3
0
4
3
4
5
78
11
59
28
36
9
26
15
21
5
2
1
8
1,366
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/convbert/modeling_convbert.py
transformers.models.convbert.modeling_convbert.ConvBertForMultipleChoice
from typing import Callable, Optional, Union from ...utils import auto_docstring, logging from torch import nn from ...modeling_outputs import BaseModelOutputWithCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss import torch @auto_docstring class ConvBertForMultipleChoice(ConvBertPreTrainedModel): def __init__(self, config): super().__init__(config) self.convbert = ConvBertModel(config) self.sequence_summary = ConvBertSequenceSummary(config) self.classifier = nn.Linear(config.hidden_size, 1) self.post_init() @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, MultipleChoiceModelOutput]: """ input_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) token_type_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. [What are token type IDs?](../glossary#token-type-ids) position_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_choices, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert *input_ids* indices into associated vectors than the model's internal embedding lookup matrix. labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above) """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1] input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None inputs_embeds = inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1)) if inputs_embeds is not None else None outputs = self.convbert(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = outputs[0] pooled_output = self.sequence_summary(sequence_output) logits = self.classifier(pooled_output) reshaped_logits = logits.view(-1, num_choices) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(reshaped_logits, labels) if not return_dict: output = (reshaped_logits,) + outputs[1:] return (loss,) + output if loss is not None else output return MultipleChoiceModelOutput(loss=loss, logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
@auto_docstring class ConvBertForMultipleChoice(ConvBertPreTrainedModel): def __init__(self, config): pass @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, MultipleChoiceModelOutput]: ''' input_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) token_type_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. [What are token type IDs?](../glossary#token-type-ids) position_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_choices, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert *input_ids* indices into associated vectors than the model's internal embedding lookup matrix. labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above) ''' pass
5
1
37
5
29
4
6
0.1
1
4
2
0
2
3
2
3
84
10
67
30
44
7
28
15
25
11
2
1
12
1,367
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/convbert/modeling_convbert.py
transformers.models.convbert.modeling_convbert.ConvBertForQuestionAnswering
from torch import nn from ...modeling_outputs import BaseModelOutputWithCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput import torch from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...utils import auto_docstring, logging from typing import Callable, Optional, Union @auto_docstring class ConvBertForQuestionAnswering(ConvBertPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.convbert = ConvBertModel(config) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) self.post_init() @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, start_positions: Optional[torch.LongTensor]=None, end_positions: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, QuestionAnsweringModelOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.convbert(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = outputs[0] logits = self.qa_outputs(sequence_output) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1).contiguous() end_logits = end_logits.squeeze(-1).contiguous() total_loss = None if start_positions is not None and end_positions is not None: if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) ignored_index = start_logits.size(1) start_positions = start_positions.clamp(0, ignored_index) end_positions = end_positions.clamp(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if not return_dict: output = (start_logits, end_logits) + outputs[1:] return (total_loss,) + output if total_loss is not None else output return QuestionAnsweringModelOutput(loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
@auto_docstring class ConvBertForQuestionAnswering(ConvBertPreTrainedModel): def __init__(self, config): pass @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, start_positions: Optional[torch.LongTensor]=None, end_positions: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, QuestionAnsweringModelOutput]: pass
5
0
41
5
30
7
4
0.19
1
4
2
0
2
3
2
3
90
10
67
30
45
13
32
16
29
7
2
2
8
1,368
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/convbert/modeling_convbert.py
transformers.models.convbert.modeling_convbert.ConvBertForSequenceClassification
import torch from ...modeling_outputs import BaseModelOutputWithCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput from typing import Callable, Optional, Union from ...utils import auto_docstring, logging from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss @auto_docstring(custom_intro='\n ConvBERT Model transformer with a sequence classification/regression head on top (a linear layer on top of the\n pooled output) e.g. for GLUE tasks.\n ') class ConvBertForSequenceClassification(ConvBertPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.config = config self.convbert = ConvBertModel(config) self.classifier = ConvBertClassificationHead(config) self.post_init() @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, SequenceClassifierOutput]: """ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.convbert(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = outputs[0] logits = self.classifier(sequence_output) loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = 'regression' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = 'single_label_classification' else: self.config.problem_type = 'multi_label_classification' if self.config.problem_type == 'regression': loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == 'single_label_classification': loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == 'multi_label_classification': loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[1:] return (loss,) + output if loss is not None else output return SequenceClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
@auto_docstring(custom_intro='\n ConvBERT Model transformer with a sequence classification/regression head on top (a linear layer on top of the\n pooled output) e.g. for GLUE tasks.\n ') class ConvBertForSequenceClassification(ConvBertPreTrainedModel): def __init__(self, config): pass @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, SequenceClassifierOutput]: ''' labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). ''' pass
5
1
39
4
32
4
7
0.1
1
6
3
0
2
4
2
3
86
8
71
26
50
7
33
13
30
12
2
3
13
1,369
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/convbert/modeling_convbert.py
transformers.models.convbert.modeling_convbert.ConvBertForTokenClassification
import torch from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from typing import Callable, Optional, Union from ...utils import auto_docstring, logging from ...modeling_outputs import BaseModelOutputWithCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput from torch import nn @auto_docstring class ConvBertForTokenClassification(ConvBertPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.convbert = ConvBertModel(config) classifier_dropout = config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob self.dropout = nn.Dropout(classifier_dropout) self.classifier = nn.Linear(config.hidden_size, config.num_labels) self.post_init() @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, TokenClassifierOutput]: """ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.convbert(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = outputs[0] sequence_output = self.dropout(sequence_output) logits = self.classifier(sequence_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + outputs[1:] return (loss,) + output if loss is not None else output return TokenClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
@auto_docstring class ConvBertForTokenClassification(ConvBertPreTrainedModel): def __init__(self, config): pass @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, TokenClassifierOutput]: ''' labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`. ''' pass
5
1
32
4
26
3
4
0.09
1
4
2
0
2
4
2
3
72
9
58
27
37
5
23
14
20
5
2
1
7
1,370
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/convbert/modeling_convbert.py
transformers.models.convbert.modeling_convbert.ConvBertGeneratorPredictions
import torch from torch import nn from ...activations import ACT2FN, get_activation class ConvBertGeneratorPredictions(nn.Module): """Prediction module for the generator, made up of two dense layers.""" def __init__(self, config): super().__init__() self.activation = get_activation('gelu') self.LayerNorm = nn.LayerNorm(config.embedding_size, eps=config.layer_norm_eps) self.dense = nn.Linear(config.hidden_size, config.embedding_size) def forward(self, generator_hidden_states: torch.FloatTensor) -> torch.FloatTensor: hidden_states = self.dense(generator_hidden_states) hidden_states = self.activation(hidden_states) hidden_states = self.LayerNorm(hidden_states) return hidden_states
class ConvBertGeneratorPredictions(nn.Module): '''Prediction module for the generator, made up of two dense layers.''' def __init__(self, config): pass def forward(self, generator_hidden_states: torch.FloatTensor) -> torch.FloatTensor: pass
3
1
6
1
5
0
1
0.09
1
1
0
0
2
3
2
12
16
4
11
7
8
1
11
7
8
1
1
0
2
1,371
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/convbert/modeling_convbert.py
transformers.models.convbert.modeling_convbert.ConvBertIntermediate
import torch from ...activations import ACT2FN, get_activation from torch import nn class ConvBertIntermediate(nn.Module): def __init__(self, config): super().__init__() if config.num_groups == 1: self.dense = nn.Linear(config.hidden_size, config.intermediate_size) else: self.dense = GroupedLinearLayer(input_size=config.hidden_size, output_size=config.intermediate_size, num_groups=config.num_groups) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states
class ConvBertIntermediate(nn.Module): def __init__(self, config): pass def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: pass
3
0
8
0
8
0
2
0
1
4
1
0
2
2
2
12
18
1
17
5
14
0
13
5
10
3
1
1
4
1,372
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/convbert/modeling_convbert.py
transformers.models.convbert.modeling_convbert.ConvBertLayer
import torch from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer from typing import Callable, Optional, Union from ...modeling_layers import GradientCheckpointingLayer class ConvBertLayer(GradientCheckpointingLayer): def __init__(self, config): super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = ConvBertAttention(config) self.is_decoder = config.is_decoder self.add_cross_attention = config.add_cross_attention if self.add_cross_attention: if not self.is_decoder: raise TypeError(f'{self} should be used as a decoder model if cross attention is added') self.crossattention = ConvBertAttention(config) self.intermediate = ConvBertIntermediate(config) self.output = ConvBertOutput(config) def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=False) -> tuple[torch.Tensor, Optional[torch.FloatTensor]]: self_attention_outputs = self.attention(hidden_states, attention_mask, head_mask, output_attentions=output_attentions) attention_output = self_attention_outputs[0] outputs = self_attention_outputs[1:] if self.is_decoder and encoder_hidden_states is not None: if not hasattr(self, 'crossattention'): raise AttributeError(f'If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`') cross_attention_outputs = self.crossattention(attention_output, encoder_attention_mask, head_mask, encoder_hidden_states, output_attentions) attention_output = cross_attention_outputs[0] outputs = outputs + cross_attention_outputs[1:] layer_output = apply_chunking_to_forward(self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output) outputs = (layer_output,) + outputs return outputs def feed_forward_chunk(self, attention_output): intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) return layer_output
class ConvBertLayer(GradientCheckpointingLayer): def __init__(self, config): pass def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=False) -> tuple[torch.Tensor, Optional[torch.FloatTensor]]: pass def feed_forward_chunk(self, attention_output): pass
4
0
19
1
18
1
2
0.04
1
8
3
0
3
8
3
13
59
4
55
27
43
2
31
19
27
3
1
2
7
1,373
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/convbert/modeling_convbert.py
transformers.models.convbert.modeling_convbert.ConvBertModel
from torch import nn from ...utils import auto_docstring, logging import torch from typing import Callable, Optional, Union from ...modeling_outputs import BaseModelOutputWithCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput @auto_docstring class ConvBertModel(ConvBertPreTrainedModel): def __init__(self, config): super().__init__(config) self.embeddings = ConvBertEmbeddings(config) if config.embedding_size != config.hidden_size: self.embeddings_project = nn.Linear(config.embedding_size, config.hidden_size) self.encoder = ConvBertEncoder(config) self.config = config self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutputWithCrossAttentions]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time') elif input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError('You have to specify either input_ids or inputs_embeds') batch_size, seq_length = input_shape device = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: attention_mask = torch.ones(input_shape, device=device) if token_type_ids is None: if hasattr(self.embeddings, 'token_type_ids'): buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length] buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length) token_type_ids = buffered_token_type_ids_expanded else: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape) head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) hidden_states = self.embeddings(input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds) if hasattr(self, 'embeddings_project'): hidden_states = self.embeddings_project(hidden_states) hidden_states = self.encoder(hidden_states, attention_mask=extended_attention_mask, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) return hidden_states
@auto_docstring class ConvBertModel(ConvBertPreTrainedModel): def __init__(self, config): pass def get_input_embeddings(self): pass def set_input_embeddings(self, value): pass def _prune_heads(self, heads_to_prune): ''' Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel ''' pass @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutputWithCrossAttentions]: pass
8
1
17
2
14
1
4
0.07
1
6
3
0
5
4
5
6
94
14
75
30
52
5
43
18
37
12
2
2
18
1,374
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/convbert/modeling_convbert.py
transformers.models.convbert.modeling_convbert.ConvBertOutput
import torch from torch import nn class ConvBertOutput(nn.Module): def __init__(self, config): super().__init__() if config.num_groups == 1: self.dense = nn.Linear(config.intermediate_size, config.hidden_size) else: self.dense = GroupedLinearLayer(input_size=config.intermediate_size, output_size=config.hidden_size, num_groups=config.num_groups) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states
class ConvBertOutput(nn.Module): def __init__(self, config): pass def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: pass
3
0
8
0
8
0
2
0
1
3
1
0
2
3
2
12
17
1
16
6
13
0
13
6
10
2
1
1
3
1,375
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/convbert/modeling_convbert.py
transformers.models.convbert.modeling_convbert.ConvBertPreTrainedModel
from ...modeling_utils import PreTrainedModel from torch import nn from .configuration_convbert import ConvBertConfig from ...utils import auto_docstring, logging @auto_docstring class ConvBertPreTrainedModel(PreTrainedModel): config: ConvBertConfig base_model_prefix = 'convbert' supports_gradient_checkpointing = True def _init_weights(self, module): """Initialize the weights""" if isinstance(module, (nn.Linear, nn.Conv1d)): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) elif isinstance(module, SeparableConv1D): module.bias.data.zero_() elif isinstance(module, GroupedLinearLayer): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) module.bias.data.zero_()
@auto_docstring class ConvBertPreTrainedModel(PreTrainedModel): def _init_weights(self, module): '''Initialize the weights''' pass
3
1
15
0
12
3
6
0.41
1
0
0
6
1
0
1
1
26
2
17
6
15
7
15
6
13
6
1
2
6
1,376
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/convbert/modeling_convbert.py
transformers.models.convbert.modeling_convbert.ConvBertPredictionHeadTransform
from ...activations import ACT2FN, get_activation import torch from torch import nn class ConvBertPredictionHeadTransform(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) if isinstance(config.hidden_act, str): self.transform_act_fn = ACT2FN[config.hidden_act] else: self.transform_act_fn = config.hidden_act self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.transform_act_fn(hidden_states) hidden_states = self.LayerNorm(hidden_states) return hidden_states
class ConvBertPredictionHeadTransform(nn.Module): def __init__(self, config): pass def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: pass
3
0
7
0
7
0
2
0
1
3
0
0
2
3
2
12
15
1
14
6
11
0
13
6
10
2
1
1
3
1,377
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/convbert/modeling_convbert.py
transformers.models.convbert.modeling_convbert.ConvBertSelfAttention
import torch from typing import Callable, Optional, Union import math from torch import nn class ConvBertSelfAttention(nn.Module): def __init__(self, config): super().__init__() if config.hidden_size % config.num_attention_heads != 0 and (not hasattr(config, 'embedding_size')): raise ValueError(f'The hidden size ({config.hidden_size}) is not a multiple of the number of attention heads ({config.num_attention_heads})') new_num_attention_heads = config.num_attention_heads // config.head_ratio if new_num_attention_heads < 1: self.head_ratio = config.num_attention_heads self.num_attention_heads = 1 else: self.num_attention_heads = new_num_attention_heads self.head_ratio = config.head_ratio self.conv_kernel_size = config.conv_kernel_size if config.hidden_size % self.num_attention_heads != 0: raise ValueError('hidden_size should be divisible by num_attention_heads') self.attention_head_size = config.hidden_size // self.num_attention_heads // 2 self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.key_conv_attn_layer = SeparableConv1D(config, config.hidden_size, self.all_head_size, self.conv_kernel_size) self.conv_kernel_layer = nn.Linear(self.all_head_size, self.num_attention_heads * self.conv_kernel_size) self.conv_out_layer = nn.Linear(config.hidden_size, self.all_head_size) self.unfold = nn.Unfold(kernel_size=[self.conv_kernel_size, 1], padding=[int((self.conv_kernel_size - 1) / 2), 0]) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=False) -> tuple[torch.Tensor, Optional[torch.Tensor]]: batch_size, seq_length, _ = hidden_states.shape if encoder_hidden_states is not None: mixed_key_layer = self.key(encoder_hidden_states) mixed_value_layer = self.value(encoder_hidden_states) else: mixed_key_layer = self.key(hidden_states) mixed_value_layer = self.value(hidden_states) mixed_key_conv_attn_layer = self.key_conv_attn_layer(hidden_states.transpose(1, 2)) mixed_key_conv_attn_layer = mixed_key_conv_attn_layer.transpose(1, 2) mixed_query_layer = self.query(hidden_states) query_layer = mixed_query_layer.view(batch_size, -1, self.num_attention_heads, self.attention_head_size).transpose(1, 2) key_layer = mixed_key_layer.view(batch_size, -1, self.num_attention_heads, self.attention_head_size).transpose(1, 2) value_layer = mixed_value_layer.view(batch_size, -1, self.num_attention_heads, self.attention_head_size).transpose(1, 2) conv_attn_layer = torch.multiply(mixed_key_conv_attn_layer, mixed_query_layer) conv_kernel_layer = self.conv_kernel_layer(conv_attn_layer) conv_kernel_layer = torch.reshape(conv_kernel_layer, [-1, self.conv_kernel_size, 1]) conv_kernel_layer = torch.softmax(conv_kernel_layer, dim=1) conv_out_layer = self.conv_out_layer(hidden_states) conv_out_layer = torch.reshape(conv_out_layer, [batch_size, -1, self.all_head_size]) conv_out_layer = conv_out_layer.transpose(1, 2).contiguous().unsqueeze(-1) conv_out_layer = nn.functional.unfold(conv_out_layer, kernel_size=[self.conv_kernel_size, 1], dilation=1, padding=[(self.conv_kernel_size - 1) // 2, 0], stride=1) conv_out_layer = conv_out_layer.transpose(1, 2).reshape(batch_size, -1, self.all_head_size, self.conv_kernel_size) conv_out_layer = torch.reshape(conv_out_layer, [-1, self.attention_head_size, self.conv_kernel_size]) conv_out_layer = torch.matmul(conv_out_layer, conv_kernel_layer) conv_out_layer = torch.reshape(conv_out_layer, [-1, self.all_head_size]) attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self.attention_head_size) if attention_mask is not None: attention_scores = attention_scores + attention_mask attention_probs = nn.functional.softmax(attention_scores, dim=-1) attention_probs = self.dropout(attention_probs) if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() conv_out = torch.reshape(conv_out_layer, [batch_size, -1, self.num_attention_heads, self.attention_head_size]) context_layer = torch.cat([context_layer, conv_out], 2) new_context_layer_shape = context_layer.size()[:-2] + (self.num_attention_heads * self.attention_head_size * 2,) context_layer = context_layer.view(*new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) return outputs
class ConvBertSelfAttention(nn.Module): def __init__(self, config): pass def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=False) -> tuple[torch.Tensor, Optional[torch.Tensor]]: pass
3
0
41
6
31
3
3
0.11
1
6
1
0
3
13
3
13
126
21
95
43
84
10
69
36
65
5
1
1
10
1,378
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/convbert/modeling_convbert.py
transformers.models.convbert.modeling_convbert.ConvBertSelfOutput
import torch from torch import nn class ConvBertSelfOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states
class ConvBertSelfOutput(nn.Module): def __init__(self, config): pass def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: pass
3
0
5
0
5
0
1
0
1
2
0
0
2
3
2
12
12
1
11
6
8
0
11
6
8
1
1
0
2
1,379
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/convbert/modeling_convbert.py
transformers.models.convbert.modeling_convbert.GroupedLinearLayer
from torch import nn import torch class GroupedLinearLayer(nn.Module): def __init__(self, input_size, output_size, num_groups): super().__init__() self.input_size = input_size self.output_size = output_size self.num_groups = num_groups self.group_in_dim = self.input_size // self.num_groups self.group_out_dim = self.output_size // self.num_groups self.weight = nn.Parameter(torch.empty(self.num_groups, self.group_in_dim, self.group_out_dim)) self.bias = nn.Parameter(torch.empty(output_size)) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: batch_size = list(hidden_states.size())[0] x = torch.reshape(hidden_states, [-1, self.num_groups, self.group_in_dim]) x = x.permute(1, 0, 2) x = torch.matmul(x, self.weight) x = x.permute(1, 0, 2) x = torch.reshape(x, [batch_size, -1, self.output_size]) x = x + self.bias return x
class GroupedLinearLayer(nn.Module): def __init__(self, input_size, output_size, num_groups): pass def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: pass
3
0
9
0
9
0
1
0
1
3
0
0
2
7
2
12
20
1
19
12
16
0
19
12
16
1
1
0
2
1,380
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/convbert/modeling_convbert.py
transformers.models.convbert.modeling_convbert.SeparableConv1D
import torch from torch import nn class SeparableConv1D(nn.Module): """This class implements separable convolution, i.e. a depthwise and a pointwise layer""" def __init__(self, config, input_filters, output_filters, kernel_size, **kwargs): super().__init__() self.depthwise = nn.Conv1d(input_filters, input_filters, kernel_size=kernel_size, groups=input_filters, padding=kernel_size // 2, bias=False) self.pointwise = nn.Conv1d(input_filters, output_filters, kernel_size=1, bias=False) self.bias = nn.Parameter(torch.zeros(output_filters, 1)) self.depthwise.weight.data.normal_(mean=0.0, std=config.initializer_range) self.pointwise.weight.data.normal_(mean=0.0, std=config.initializer_range) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: x = self.depthwise(hidden_states) x = self.pointwise(x) x += self.bias return x
class SeparableConv1D(nn.Module): '''This class implements separable convolution, i.e. a depthwise and a pointwise layer''' def __init__(self, config, input_filters, output_filters, kernel_size, **kwargs): pass def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: pass
3
1
10
1
10
0
1
0.05
1
2
0
0
2
3
2
12
24
3
20
7
17
1
13
7
10
1
1
0
2
1,381
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/convbert/tokenization_convbert.py
transformers.models.convbert.tokenization_convbert.ConvBertTokenizer
import collections import os from ...tokenization_utils import PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace from typing import Optional class ConvBertTokenizer(PreTrainedTokenizer): """ Construct a ConvBERT tokenizer. Based on WordPiece. This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`): File containing the vocabulary. do_lower_case (`bool`, *optional*, defaults to `True`): Whether or not to lowercase the input when tokenizing. do_basic_tokenize (`bool`, *optional*, defaults to `True`): Whether or not to do basic tokenization before WordPiece. never_split (`Iterable`, *optional*): Collection of tokens which will never be split during tokenization. Only has an effect when `do_basic_tokenize=True` unk_token (`str`, *optional*, defaults to `"[UNK]"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. sep_token (`str`, *optional*, defaults to `"[SEP]"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. pad_token (`str`, *optional*, defaults to `"[PAD]"`): The token used for padding, for example when batching sequences of different lengths. cls_token (`str`, *optional*, defaults to `"[CLS]"`): The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens. mask_token (`str`, *optional*, defaults to `"[MASK]"`): The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict. tokenize_chinese_chars (`bool`, *optional*, defaults to `True`): Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see this [issue](https://github.com/huggingface/transformers/issues/328)). strip_accents (`bool`, *optional*): Whether or not to strip all accents. If this option is not specified, then it will be determined by the value for `lowercase` (as in the original ConvBERT). clean_up_tokenization_spaces (`bool`, *optional*, defaults to `True`): Whether or not to cleanup spaces after decoding, cleanup consists in removing potential artifacts like extra spaces. """ vocab_files_names = VOCAB_FILES_NAMES def __init__(self, vocab_file, do_lower_case=True, do_basic_tokenize=True, never_split=None, unk_token='[UNK]', sep_token='[SEP]', pad_token='[PAD]', cls_token='[CLS]', mask_token='[MASK]', tokenize_chinese_chars=True, strip_accents=None, clean_up_tokenization_spaces=True, **kwargs): if not os.path.isfile(vocab_file): raise ValueError(f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`") self.vocab = load_vocab(vocab_file) self.ids_to_tokens = collections.OrderedDict([(ids, tok) for tok, ids in self.vocab.items()]) self.do_basic_tokenize = do_basic_tokenize if do_basic_tokenize: self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case, never_split=never_split, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents) self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=str(unk_token)) super().__init__(do_lower_case=do_lower_case, do_basic_tokenize=do_basic_tokenize, never_split=never_split, unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents, clean_up_tokenization_spaces=clean_up_tokenization_spaces, **kwargs) @property def do_lower_case(self): return self.basic_tokenizer.do_lower_case @property def vocab_size(self): return len(self.vocab) def get_vocab(self): return dict(self.vocab, **self.added_tokens_encoder) def _tokenize(self, text, split_special_tokens=False): split_tokens = [] if self.do_basic_tokenize: for token in self.basic_tokenizer.tokenize(text, never_split=self.all_special_tokens if not split_special_tokens else None): if token in self.basic_tokenizer.never_split: split_tokens.append(token) else: split_tokens += self.wordpiece_tokenizer.tokenize(token) else: split_tokens = self.wordpiece_tokenizer.tokenize(text) return split_tokens def _convert_token_to_id(self, token): """Converts a token (str) in an id using the vocab.""" return self.vocab.get(token, self.vocab.get(self.unk_token)) def _convert_id_to_token(self, index): """Converts an index (integer) in a token (str) using the vocab.""" return self.ids_to_tokens.get(index, self.unk_token) def convert_tokens_to_string(self, tokens): """Converts a sequence of tokens (string) in a single string.""" out_string = ' '.join(tokens).replace(' ##', '').strip() return out_string def build_inputs_with_special_tokens(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]: """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A ConvBERT sequence has the following format: - single sequence: `[CLS] X [SEP]` - pair of sequences: `[CLS] A [SEP] B [SEP]` Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ if token_ids_1 is None: return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] cls = [self.cls_token_id] sep = [self.sep_token_id] return cls + token_ids_0 + sep + token_ids_1 + sep def get_special_tokens_mask(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None, already_has_special_tokens: bool=False) -> list[int]: """ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` method. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True) if token_ids_1 is not None: return [1] + [0] * len(token_ids_0) + [1] + [0] * len(token_ids_1) + [1] return [1] + [0] * len(token_ids_0) + [1] def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]: index = 0 if os.path.isdir(save_directory): vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) else: vocab_file = (filename_prefix + '-' if filename_prefix else '') + save_directory with open(vocab_file, 'w', encoding='utf-8') as writer: for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]): if index != token_index: logger.warning(f'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive. Please check that the vocabulary is not corrupted!') index = token_index writer.write(token + '\n') index += 1 return (vocab_file,)
class ConvBertTokenizer(PreTrainedTokenizer): ''' Construct a ConvBERT tokenizer. Based on WordPiece. This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`): File containing the vocabulary. do_lower_case (`bool`, *optional*, defaults to `True`): Whether or not to lowercase the input when tokenizing. do_basic_tokenize (`bool`, *optional*, defaults to `True`): Whether or not to do basic tokenization before WordPiece. never_split (`Iterable`, *optional*): Collection of tokens which will never be split during tokenization. Only has an effect when `do_basic_tokenize=True` unk_token (`str`, *optional*, defaults to `"[UNK]"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. sep_token (`str`, *optional*, defaults to `"[SEP]"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. pad_token (`str`, *optional*, defaults to `"[PAD]"`): The token used for padding, for example when batching sequences of different lengths. cls_token (`str`, *optional*, defaults to `"[CLS]"`): The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens. mask_token (`str`, *optional*, defaults to `"[MASK]"`): The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict. tokenize_chinese_chars (`bool`, *optional*, defaults to `True`): Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see this [issue](https://github.com/huggingface/transformers/issues/328)). strip_accents (`bool`, *optional*): Whether or not to strip all accents. If this option is not specified, then it will be determined by the value for `lowercase` (as in the original ConvBERT). clean_up_tokenization_spaces (`bool`, *optional*, defaults to `True`): Whether or not to cleanup spaces after decoding, cleanup consists in removing potential artifacts like extra spaces. ''' def __init__(self, vocab_file, do_lower_case=True, do_basic_tokenize=True, never_split=None, unk_token='[UNK]', sep_token='[SEP]', pad_token='[PAD]', cls_token='[CLS]', mask_token='[MASK]', tokenize_chinese_chars=True, strip_accents=None, clean_up_tokenization_spaces=True, **kwargs): pass @property def do_lower_case(self): pass @property def vocab_size(self): pass def get_vocab(self): pass def _tokenize(self, text, split_special_tokens=False): pass def _convert_token_to_id(self, token): '''Converts a token (str) in an id using the vocab.''' pass def _convert_id_to_token(self, index): '''Converts an index (integer) in a token (str) using the vocab.''' pass def convert_tokens_to_string(self, tokens): '''Converts a sequence of tokens (string) in a single string.''' pass def build_inputs_with_special_tokens(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]: ''' Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A ConvBERT sequence has the following format: - single sequence: `[CLS] X [SEP]` - pair of sequences: `[CLS] A [SEP] B [SEP]` Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. ''' pass def get_special_tokens_mask(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None, already_has_special_tokens: bool=False) -> list[int]: ''' Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` method. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. ''' pass def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]: pass
14
6
15
1
10
4
2
0.72
1
9
2
0
12
5
12
101
236
29
121
53
85
87
65
29
52
6
3
3
27
1,382
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/convbert/tokenization_convbert_fast.py
transformers.models.convbert.tokenization_convbert_fast.ConvBertTokenizerFast
from ...tokenization_utils_fast import PreTrainedTokenizerFast from tokenizers import normalizers from .tokenization_convbert import ConvBertTokenizer from typing import Optional import json class ConvBertTokenizerFast(PreTrainedTokenizerFast): """ Construct a "fast" ConvBERT tokenizer (backed by HuggingFace's *tokenizers* library). Based on WordPiece. This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`): File containing the vocabulary. do_lower_case (`bool`, *optional*, defaults to `True`): Whether or not to lowercase the input when tokenizing. unk_token (`str`, *optional*, defaults to `"[UNK]"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. sep_token (`str`, *optional*, defaults to `"[SEP]"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. pad_token (`str`, *optional*, defaults to `"[PAD]"`): The token used for padding, for example when batching sequences of different lengths. cls_token (`str`, *optional*, defaults to `"[CLS]"`): The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens. mask_token (`str`, *optional*, defaults to `"[MASK]"`): The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict. clean_text (`bool`, *optional*, defaults to `True`): Whether or not to clean the text before tokenization by removing any control characters and replacing all whitespaces by the classic one. tokenize_chinese_chars (`bool`, *optional*, defaults to `True`): Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see [this issue](https://github.com/huggingface/transformers/issues/328)). strip_accents (`bool`, *optional*): Whether or not to strip all accents. If this option is not specified, then it will be determined by the value for `lowercase` (as in the original ConvBERT). wordpieces_prefix (`str`, *optional*, defaults to `"##"`): The prefix for subwords. """ vocab_files_names = VOCAB_FILES_NAMES slow_tokenizer_class = ConvBertTokenizer def __init__(self, vocab_file=None, tokenizer_file=None, do_lower_case=True, unk_token='[UNK]', sep_token='[SEP]', pad_token='[PAD]', cls_token='[CLS]', mask_token='[MASK]', tokenize_chinese_chars=True, strip_accents=None, **kwargs): super().__init__(vocab_file, tokenizer_file=tokenizer_file, do_lower_case=do_lower_case, unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents, **kwargs) normalizer_state = json.loads(self.backend_tokenizer.normalizer.__getstate__()) if normalizer_state.get('lowercase', do_lower_case) != do_lower_case or normalizer_state.get('strip_accents', strip_accents) != strip_accents or normalizer_state.get('handle_chinese_chars', tokenize_chinese_chars) != tokenize_chinese_chars: normalizer_class = getattr(normalizers, normalizer_state.pop('type')) normalizer_state['lowercase'] = do_lower_case normalizer_state['strip_accents'] = strip_accents normalizer_state['handle_chinese_chars'] = tokenize_chinese_chars self.backend_tokenizer.normalizer = normalizer_class(**normalizer_state) self.do_lower_case = do_lower_case def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A ConvBERT sequence has the following format: - single sequence: `[CLS] X [SEP]` - pair of sequences: `[CLS] A [SEP] B [SEP]` Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ output = [self.cls_token_id] + token_ids_0 + [self.sep_token_id] if token_ids_1 is not None: output += token_ids_1 + [self.sep_token_id] return output def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]: files = self._tokenizer.model.save(save_directory, name=filename_prefix) return tuple(files)
class ConvBertTokenizerFast(PreTrainedTokenizerFast): ''' Construct a "fast" ConvBERT tokenizer (backed by HuggingFace's *tokenizers* library). Based on WordPiece. This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`): File containing the vocabulary. do_lower_case (`bool`, *optional*, defaults to `True`): Whether or not to lowercase the input when tokenizing. unk_token (`str`, *optional*, defaults to `"[UNK]"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. sep_token (`str`, *optional*, defaults to `"[SEP]"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. pad_token (`str`, *optional*, defaults to `"[PAD]"`): The token used for padding, for example when batching sequences of different lengths. cls_token (`str`, *optional*, defaults to `"[CLS]"`): The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens. mask_token (`str`, *optional*, defaults to `"[MASK]"`): The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict. clean_text (`bool`, *optional*, defaults to `True`): Whether or not to clean the text before tokenization by removing any control characters and replacing all whitespaces by the classic one. tokenize_chinese_chars (`bool`, *optional*, defaults to `True`): Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see [this issue](https://github.com/huggingface/transformers/issues/328)). strip_accents (`bool`, *optional*): Whether or not to strip all accents. If this option is not specified, then it will be determined by the value for `lowercase` (as in the original ConvBERT). wordpieces_prefix (`str`, *optional*, defaults to `"##"`): The prefix for subwords. ''' def __init__(self, vocab_file=None, tokenizer_file=None, do_lower_case=True, unk_token='[UNK]', sep_token='[SEP]', pad_token='[PAD]', cls_token='[CLS]', mask_token='[MASK]', tokenize_chinese_chars=True, strip_accents=None, **kwargs): pass def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): ''' Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A ConvBERT sequence has the following format: - single sequence: `[CLS] X [SEP]` - pair of sequences: `[CLS] A [SEP] B [SEP]` Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. ''' pass def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]: pass
4
2
24
3
14
7
2
1.12
1
4
0
0
4
1
4
92
141
18
58
29
38
65
27
14
22
2
3
1
7
1,383
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/convnext/configuration_convnext.py
transformers.models.convnext.configuration_convnext.ConvNextConfig
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices from ...configuration_utils import PretrainedConfig class ConvNextConfig(BackboneConfigMixin, PretrainedConfig): """ This is the configuration class to store the configuration of a [`ConvNextModel`]. It is used to instantiate an ConvNeXT model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the ConvNeXT [facebook/convnext-tiny-224](https://huggingface.co/facebook/convnext-tiny-224) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: num_channels (`int`, *optional*, defaults to 3): The number of input channels. patch_size (`int`, *optional*, defaults to 4): Patch size to use in the patch embedding layer. num_stages (`int`, *optional*, defaults to 4): The number of stages in the model. hidden_sizes (`list[int]`, *optional*, defaults to [96, 192, 384, 768]): Dimensionality (hidden size) at each stage. depths (`list[int]`, *optional*, defaults to [3, 3, 9, 3]): Depth (number of blocks) for each stage. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in each block. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-12): The epsilon used by the layer normalization layers. layer_scale_init_value (`float`, *optional*, defaults to 1e-6): The initial value for the layer scale. drop_path_rate (`float`, *optional*, defaults to 0.0): The drop rate for stochastic depth. out_features (`list[str]`, *optional*): If used as backbone, list of features to output. Can be any of `"stem"`, `"stage1"`, `"stage2"`, etc. (depending on how many stages the model has). If unset and `out_indices` is set, will default to the corresponding stages. If unset and `out_indices` is unset, will default to the last stage. Must be in the same order as defined in the `stage_names` attribute. out_indices (`list[int]`, *optional*): If used as backbone, list of indices of features to output. Can be any of 0, 1, 2, etc. (depending on how many stages the model has). If unset and `out_features` is set, will default to the corresponding stages. If unset and `out_features` is unset, will default to the last stage. Must be in the same order as defined in the `stage_names` attribute. Example: ```python >>> from transformers import ConvNextConfig, ConvNextModel >>> # Initializing a ConvNext convnext-tiny-224 style configuration >>> configuration = ConvNextConfig() >>> # Initializing a model (with random weights) from the convnext-tiny-224 style configuration >>> model = ConvNextModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = 'convnext' def __init__(self, num_channels=3, patch_size=4, num_stages=4, hidden_sizes=None, depths=None, hidden_act='gelu', initializer_range=0.02, layer_norm_eps=1e-12, layer_scale_init_value=1e-06, drop_path_rate=0.0, image_size=224, out_features=None, out_indices=None, **kwargs): super().__init__(**kwargs) self.num_channels = num_channels self.patch_size = patch_size self.num_stages = num_stages self.hidden_sizes = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes self.depths = [3, 3, 9, 3] if depths is None else depths self.hidden_act = hidden_act self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.layer_scale_init_value = layer_scale_init_value self.drop_path_rate = drop_path_rate self.image_size = image_size self.stage_names = ['stem'] + [f'stage{idx}' for idx in range(1, len(self.depths) + 1)] self._out_features, self._out_indices = get_aligned_output_features_output_indices(out_features=out_features, out_indices=out_indices, stage_names=self.stage_names)
class ConvNextConfig(BackboneConfigMixin, PretrainedConfig): ''' This is the configuration class to store the configuration of a [`ConvNextModel`]. It is used to instantiate an ConvNeXT model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the ConvNeXT [facebook/convnext-tiny-224](https://huggingface.co/facebook/convnext-tiny-224) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: num_channels (`int`, *optional*, defaults to 3): The number of input channels. patch_size (`int`, *optional*, defaults to 4): Patch size to use in the patch embedding layer. num_stages (`int`, *optional*, defaults to 4): The number of stages in the model. hidden_sizes (`list[int]`, *optional*, defaults to [96, 192, 384, 768]): Dimensionality (hidden size) at each stage. depths (`list[int]`, *optional*, defaults to [3, 3, 9, 3]): Depth (number of blocks) for each stage. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in each block. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-12): The epsilon used by the layer normalization layers. layer_scale_init_value (`float`, *optional*, defaults to 1e-6): The initial value for the layer scale. drop_path_rate (`float`, *optional*, defaults to 0.0): The drop rate for stochastic depth. out_features (`list[str]`, *optional*): If used as backbone, list of features to output. Can be any of `"stem"`, `"stage1"`, `"stage2"`, etc. (depending on how many stages the model has). If unset and `out_indices` is set, will default to the corresponding stages. If unset and `out_indices` is unset, will default to the last stage. Must be in the same order as defined in the `stage_names` attribute. out_indices (`list[int]`, *optional*): If used as backbone, list of indices of features to output. Can be any of 0, 1, 2, etc. (depending on how many stages the model has). If unset and `out_features` is set, will default to the corresponding stages. If unset and `out_features` is unset, will default to the last stage. Must be in the same order as defined in the `stage_names` attribute. Example: ```python >>> from transformers import ConvNextConfig, ConvNextModel >>> # Initializing a ConvNext convnext-tiny-224 style configuration >>> configuration = ConvNextConfig() >>> # Initializing a model (with random weights) from the convnext-tiny-224 style configuration >>> model = ConvNextModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```''' def __init__(self, num_channels=3, patch_size=4, num_stages=4, hidden_sizes=None, depths=None, hidden_act='gelu', initializer_range=0.02, layer_norm_eps=1e-12, layer_scale_init_value=1e-06, drop_path_rate=0.0, image_size=224, out_features=None, out_indices=None, **kwargs): pass
2
1
34
1
33
0
3
1.4
2
2
0
0
1
14
1
6
93
9
35
32
17
49
17
16
15
3
1
0
3
1,384
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/convnext/configuration_convnext.py
transformers.models.convnext.configuration_convnext.ConvNextOnnxConfig
from collections.abc import Mapping from collections import OrderedDict from packaging import version from ...onnx import OnnxConfig class ConvNextOnnxConfig(OnnxConfig): torch_onnx_minimum_version = version.parse('1.11') @property def inputs(self) -> Mapping[str, Mapping[int, str]]: return OrderedDict([('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'})]) @property def atol_for_validation(self) -> float: return 1e-05
class ConvNextOnnxConfig(OnnxConfig): @property def inputs(self) -> Mapping[str, Mapping[int, str]]: pass @property def atol_for_validation(self) -> float: pass
5
0
4
0
4
0
1
0
1
4
0
0
2
0
2
2
14
2
12
6
7
0
6
4
3
1
1
0
2
1,385
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/convnext/feature_extraction_convnext.py
transformers.models.convnext.feature_extraction_convnext.ConvNextFeatureExtractor
import warnings from ...utils.import_utils import requires from .image_processing_convnext import ConvNextImageProcessor @requires(backends=('vision',)) class ConvNextFeatureExtractor(ConvNextImageProcessor): def __init__(self, *args, **kwargs) -> None: warnings.warn('The class ConvNextFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please use ConvNextImageProcessor instead.', FutureWarning) super().__init__(*args, **kwargs)
@requires(backends=('vision',)) class ConvNextFeatureExtractor(ConvNextImageProcessor): def __init__(self, *args, **kwargs) -> None: pass
3
0
7
0
7
0
1
0
1
2
0
0
1
0
1
24
8
0
8
2
6
0
4
2
2
1
4
0
1
1,386
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/convnext/image_processing_convnext.py
transformers.models.convnext.image_processing_convnext.ConvNextImageProcessor
from typing import Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import center_crop, get_resize_output_image_size, resize, to_channel_dimension_format from ...utils.import_utils import requires from ...utils import TensorType, filter_out_non_signature_kwargs, is_vision_available, logging from ...image_utils import IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, infer_channel_dimension_format, is_scaled_image, make_flat_list_of_images, to_numpy_array, valid_images, validate_preprocess_arguments @requires(backends=('vision',)) class ConvNextImageProcessor(BaseImageProcessor): """ Constructs a ConvNeXT image processor. Args: do_resize (`bool`, *optional*, defaults to `True`): Controls whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by `do_resize` in the `preprocess` method. size (`dict[str, int]` *optional*, defaults to `{"shortest_edge": 384}`): Resolution of the output image after `resize` is applied. If `size["shortest_edge"]` >= 384, the image is resized to `(size["shortest_edge"], size["shortest_edge"])`. Otherwise, the smaller edge of the image will be matched to `int(size["shortest_edge"]/crop_pct)`, after which the image is cropped to `(size["shortest_edge"], size["shortest_edge"])`. Only has an effect if `do_resize` is set to `True`. Can be overridden by `size` in the `preprocess` method. crop_pct (`float` *optional*, defaults to 224 / 256): Percentage of the image to crop. Only has an effect if `do_resize` is `True` and size < 384. Can be overridden by `crop_pct` in the `preprocess` method. resample (`PILImageResampling`, *optional*, defaults to `Resampling.BILINEAR`): Resampling filter to use if resizing the image. Can be overridden by `resample` in the `preprocess` method. do_rescale (`bool`, *optional*, defaults to `True`): Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by `do_rescale` in the `preprocess` method. rescale_factor (`int` or `float`, *optional*, defaults to `1/255`): Scale factor to use if rescaling the image. Can be overridden by `rescale_factor` in the `preprocess` method. do_normalize (`bool`, *optional*, defaults to `True`): Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess` method. image_mean (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`): Mean to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. image_std (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`): Standard deviation to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method. """ model_input_names = ['pixel_values'] def __init__(self, do_resize: bool=True, size: Optional[dict[str, int]]=None, crop_pct: Optional[float]=None, resample: PILImageResampling=PILImageResampling.BILINEAR, do_rescale: bool=True, rescale_factor: Union[int, float]=1 / 255, do_normalize: bool=True, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, **kwargs) -> None: super().__init__(**kwargs) size = size if size is not None else {'shortest_edge': 384} size = get_size_dict(size, default_to_square=False) self.do_resize = do_resize self.size = size self.crop_pct = crop_pct if crop_pct is not None else 224 / 256 self.resample = resample self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_normalize = do_normalize self.image_mean = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN self.image_std = image_std if image_std is not None else IMAGENET_STANDARD_STD def resize(self, image: np.ndarray, size: dict[str, int], crop_pct: float, resample: PILImageResampling=PILImageResampling.BICUBIC, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray: """ Resize an image. Args: image (`np.ndarray`): Image to resize. size (`dict[str, int]`): Dictionary of the form `{"shortest_edge": int}`, specifying the size of the output image. If `size["shortest_edge"]` >= 384 image is resized to `(size["shortest_edge"], size["shortest_edge"])`. Otherwise, the smaller edge of the image will be matched to `int(size["shortest_edge"] / crop_pct)`, after which the image is cropped to `(size["shortest_edge"], size["shortest_edge"])`. crop_pct (`float`): Percentage of the image to crop. Only has an effect if size < 384. resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`): Resampling filter to use when resizing the image. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the image. If not provided, it will be the same as the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred from the input image. """ size = get_size_dict(size, default_to_square=False) if 'shortest_edge' not in size: raise ValueError(f"Size dictionary must contain 'shortest_edge' key. Got {size.keys()}") shortest_edge = size['shortest_edge'] if shortest_edge < 384: resize_shortest_edge = int(shortest_edge / crop_pct) resize_size = get_resize_output_image_size(image, size=resize_shortest_edge, default_to_square=False, input_data_format=input_data_format) image = resize(image=image, size=resize_size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs) return center_crop(image=image, size=(shortest_edge, shortest_edge), data_format=data_format, input_data_format=input_data_format, **kwargs) else: return resize(image, size=(shortest_edge, shortest_edge), resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs) @filter_out_non_signature_kwargs() def preprocess(self, images: ImageInput, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, crop_pct: Optional[float]=None, resample: Optional[PILImageResampling]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, return_tensors: Optional[Union[str, TensorType]]=None, data_format: ChannelDimension=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> PIL.Image.Image: """ Preprocess an image or batch of images. Args: images (`ImageInput`): Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If passing in images with pixel values between 0 and 1, set `do_rescale=False`. do_resize (`bool`, *optional*, defaults to `self.do_resize`): Whether to resize the image. size (`dict[str, int]`, *optional*, defaults to `self.size`): Size of the output image after `resize` has been applied. If `size["shortest_edge"]` >= 384, the image is resized to `(size["shortest_edge"], size["shortest_edge"])`. Otherwise, the smaller edge of the image will be matched to `int(size["shortest_edge"]/ crop_pct)`, after which the image is cropped to `(size["shortest_edge"], size["shortest_edge"])`. Only has an effect if `do_resize` is set to `True`. crop_pct (`float`, *optional*, defaults to `self.crop_pct`): Percentage of the image to crop if size < 384. resample (`int`, *optional*, defaults to `self.resample`): Resampling filter to use if resizing the image. This can be one of `PILImageResampling`, filters. Only has an effect if `do_resize` is set to `True`. do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): Whether to rescale the image values between [0 - 1]. rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`): Rescale factor to rescale the image by if `do_rescale` is set to `True`. do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): Whether to normalize the image. image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`): Image mean. image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`): Image standard deviation. return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Can be one of: - Unset: Return a list of `np.ndarray`. - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format for the output image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - Unset: Use the channel dimension format of the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. """ do_resize = do_resize if do_resize is not None else self.do_resize crop_pct = crop_pct if crop_pct is not None else self.crop_pct resample = resample if resample is not None else self.resample do_rescale = do_rescale if do_rescale is not None else self.do_rescale rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor do_normalize = do_normalize if do_normalize is not None else self.do_normalize image_mean = image_mean if image_mean is not None else self.image_mean image_std = image_std if image_std is not None else self.image_std size = size if size is not None else self.size size = get_size_dict(size, default_to_square=False) images = make_flat_list_of_images(images) if not valid_images(images): raise ValueError('Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, or torch.Tensor') validate_preprocess_arguments(do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_resize=do_resize, size=size, resample=resample) images = [to_numpy_array(image) for image in images] if do_rescale and is_scaled_image(images[0]): logger.warning_once('It looks like you are trying to rescale already rescaled images. If the input images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again.') if input_data_format is None: input_data_format = infer_channel_dimension_format(images[0]) if do_resize: images = [self.resize(image=image, size=size, crop_pct=crop_pct, resample=resample, input_data_format=input_data_format) for image in images] if do_rescale: images = [self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format) for image in images] if do_normalize: images = [self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format) for image in images] images = [to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images] data = {'pixel_values': images} return BatchFeature(data=data, tensor_type=return_tensors)
@requires(backends=('vision',)) class ConvNextImageProcessor(BaseImageProcessor): ''' Constructs a ConvNeXT image processor. Args: do_resize (`bool`, *optional*, defaults to `True`): Controls whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by `do_resize` in the `preprocess` method. size (`dict[str, int]` *optional*, defaults to `{"shortest_edge": 384}`): Resolution of the output image after `resize` is applied. If `size["shortest_edge"]` >= 384, the image is resized to `(size["shortest_edge"], size["shortest_edge"])`. Otherwise, the smaller edge of the image will be matched to `int(size["shortest_edge"]/crop_pct)`, after which the image is cropped to `(size["shortest_edge"], size["shortest_edge"])`. Only has an effect if `do_resize` is set to `True`. Can be overridden by `size` in the `preprocess` method. crop_pct (`float` *optional*, defaults to 224 / 256): Percentage of the image to crop. Only has an effect if `do_resize` is `True` and size < 384. Can be overridden by `crop_pct` in the `preprocess` method. resample (`PILImageResampling`, *optional*, defaults to `Resampling.BILINEAR`): Resampling filter to use if resizing the image. Can be overridden by `resample` in the `preprocess` method. do_rescale (`bool`, *optional*, defaults to `True`): Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by `do_rescale` in the `preprocess` method. rescale_factor (`int` or `float`, *optional*, defaults to `1/255`): Scale factor to use if rescaling the image. Can be overridden by `rescale_factor` in the `preprocess` method. do_normalize (`bool`, *optional*, defaults to `True`): Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess` method. image_mean (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`): Mean to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. image_std (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`): Standard deviation to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method. ''' def __init__(self, do_resize: bool=True, size: Optional[dict[str, int]]=None, crop_pct: Optional[float]=None, resample: PILImageResampling=PILImageResampling.BILINEAR, do_rescale: bool=True, rescale_factor: Union[int, float]=1 / 255, do_normalize: bool=True, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, **kwargs) -> None: pass def resize(self, image: np.ndarray, size: dict[str, int], crop_pct: float, resample: PILImageResampling=PILImageResampling.BICUBIC, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray: ''' Resize an image. Args: image (`np.ndarray`): Image to resize. size (`dict[str, int]`): Dictionary of the form `{"shortest_edge": int}`, specifying the size of the output image. If `size["shortest_edge"]` >= 384 image is resized to `(size["shortest_edge"], size["shortest_edge"])`. Otherwise, the smaller edge of the image will be matched to `int(size["shortest_edge"] / crop_pct)`, after which the image is cropped to `(size["shortest_edge"], size["shortest_edge"])`. crop_pct (`float`): Percentage of the image to crop. Only has an effect if size < 384. resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`): Resampling filter to use when resizing the image. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the image. If not provided, it will be the same as the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred from the input image. ''' pass @filter_out_non_signature_kwargs() def preprocess(self, images: ImageInput, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, crop_pct: Optional[float]=None, resample: Optional[PILImageResampling]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, return_tensors: Optional[Union[str, TensorType]]=None, data_format: ChannelDimension=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> PIL.Image.Image: ''' Preprocess an image or batch of images. Args: images (`ImageInput`): Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If passing in images with pixel values between 0 and 1, set `do_rescale=False`. do_resize (`bool`, *optional*, defaults to `self.do_resize`): Whether to resize the image. size (`dict[str, int]`, *optional*, defaults to `self.size`): Size of the output image after `resize` has been applied. If `size["shortest_edge"]` >= 384, the image is resized to `(size["shortest_edge"], size["shortest_edge"])`. Otherwise, the smaller edge of the image will be matched to `int(size["shortest_edge"]/ crop_pct)`, after which the image is cropped to `(size["shortest_edge"], size["shortest_edge"])`. Only has an effect if `do_resize` is set to `True`. crop_pct (`float`, *optional*, defaults to `self.crop_pct`): Percentage of the image to crop if size < 384. resample (`int`, *optional*, defaults to `self.resample`): Resampling filter to use if resizing the image. This can be one of `PILImageResampling`, filters. Only has an effect if `do_resize` is set to `True`. do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): Whether to rescale the image values between [0 - 1]. rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`): Rescale factor to rescale the image by if `do_rescale` is set to `True`. do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): Whether to normalize the image. image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`): Image mean. image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`): Image standard deviation. return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Can be one of: - Unset: Return a list of `np.ndarray`. - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format for the output image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - Unset: Use the channel dimension format of the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. ''' pass
6
3
76
5
47
24
8
0.74
1
8
2
1
3
9
3
23
270
21
143
55
102
106
55
18
51
16
3
1
24
1,387
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/convnext/image_processing_convnext_fast.py
transformers.models.convnext.image_processing_convnext_fast.ConvNextImageProcessorFast
from ...image_processing_utils_fast import BaseImageProcessorFast, DefaultFastImageProcessorKwargs, group_images_by_shape, reorder_images from ...utils import TensorType, auto_docstring, is_torchvision_v2_available import torch from ...image_processing_utils import BatchFeature from ...image_transforms import get_resize_output_image_size from typing import Optional, Union from ...image_utils import IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling from ...processing_utils import Unpack @auto_docstring class ConvNextImageProcessorFast(BaseImageProcessorFast): resample = PILImageResampling.BILINEAR image_mean = IMAGENET_STANDARD_MEAN image_std = IMAGENET_STANDARD_STD size = {'shortest_edge': 384} default_to_square = False do_resize = True do_rescale = True do_normalize = True crop_pct = 224 / 256 valid_kwargs = ConvNextFastImageProcessorKwargs def __init__(self, **kwargs: Unpack[ConvNextFastImageProcessorKwargs]): super().__init__(**kwargs) @auto_docstring def preprocess(self, images: ImageInput, **kwargs: Unpack[ConvNextFastImageProcessorKwargs]) -> BatchFeature: return super().preprocess(images, **kwargs) def resize(self, image: 'torch.Tensor', size: dict[str, int], crop_pct: float, interpolation: PILImageResampling=PILImageResampling.BICUBIC, **kwargs) -> 'torch.Tensor': """ Resize an image. Args: image (`torch.Tensor`): Image to resize. size (`dict[str, int]`): Dictionary of the form `{"shortest_edge": int}`, specifying the size of the output image. If `size["shortest_edge"]` >= 384 image is resized to `(size["shortest_edge"], size["shortest_edge"])`. Otherwise, the smaller edge of the image will be matched to `int(size["shortest_edge"] / crop_pct)`, after which the image is cropped to `(size["shortest_edge"], size["shortest_edge"])`. crop_pct (`float`): Percentage of the image to crop. Only has an effect if size < 384. resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`): Resampling filter to use when resizing the image. Returns: `torch.Tensor`: Resized image. """ if not size.shortest_edge: raise ValueError(f"Size dictionary must contain 'shortest_edge' key. Got {size.keys()}") shortest_edge = size['shortest_edge'] if shortest_edge < 384: resize_shortest_edge = int(shortest_edge / crop_pct) resize_size = get_resize_output_image_size(image, size=resize_shortest_edge, default_to_square=False, input_data_format=ChannelDimension.FIRST) image = F.resize(image, resize_size, interpolation=interpolation, **kwargs) return F.center_crop(image, (shortest_edge, shortest_edge), **kwargs) else: return F.resize(image, (shortest_edge, shortest_edge), interpolation=interpolation, **kwargs) def _preprocess(self, images: list['torch.Tensor'], do_resize: bool, size: dict[str, int], crop_pct: float, interpolation: Optional['F.InterpolationMode'], do_center_crop: bool, crop_size: int, do_rescale: bool, rescale_factor: float, do_normalize: bool, image_mean: Optional[Union[float, list[float]]], image_std: Optional[Union[float, list[float]]], disable_grouping: Optional[bool], return_tensors: Optional[Union[str, TensorType]], **kwargs) -> BatchFeature: grouped_images, grouped_images_index = group_images_by_shape(images, disable_grouping=disable_grouping) resized_images_grouped = {} for shape, stacked_images in grouped_images.items(): if do_resize: stacked_images = self.resize(image=stacked_images, size=size, crop_pct=crop_pct, interpolation=interpolation) resized_images_grouped[shape] = stacked_images resized_images = reorder_images(resized_images_grouped, grouped_images_index) grouped_images, grouped_images_index = group_images_by_shape(resized_images, disable_grouping=disable_grouping) processed_images_grouped = {} for shape, stacked_images in grouped_images.items(): if do_center_crop: stacked_images = self.center_crop(stacked_images, crop_size) stacked_images = self.rescale_and_normalize(stacked_images, do_rescale, rescale_factor, do_normalize, image_mean, image_std) processed_images_grouped[shape] = stacked_images processed_images = reorder_images(processed_images_grouped, grouped_images_index) processed_images = torch.stack(processed_images, dim=0) if return_tensors else processed_images return BatchFeature(data={'pixel_values': processed_images}, tensor_type=return_tensors)
@auto_docstring class ConvNextImageProcessorFast(BaseImageProcessorFast): def __init__(self, **kwargs: Unpack[ConvNextFastImageProcessorKwargs]): pass @auto_docstring def preprocess(self, images: ImageInput, **kwargs: Unpack[ConvNextFastImageProcessorKwargs]) -> BatchFeature: pass def resize(self, image: 'torch.Tensor', size: dict[str, int], crop_pct: float, interpolation: PILImageResampling=PILImageResampling.BICUBIC, **kwargs) -> 'torch.Tensor': ''' Resize an image. Args: image (`torch.Tensor`): Image to resize. size (`dict[str, int]`): Dictionary of the form `{"shortest_edge": int}`, specifying the size of the output image. If `size["shortest_edge"]` >= 384 image is resized to `(size["shortest_edge"], size["shortest_edge"])`. Otherwise, the smaller edge of the image will be matched to `int(size["shortest_edge"] / crop_pct)`, after which the image is cropped to `(size["shortest_edge"], size["shortest_edge"])`. crop_pct (`float`): Percentage of the image to crop. Only has an effect if size < 384. resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`): Resampling filter to use when resizing the image. Returns: `torch.Tensor`: Resized image. ''' pass def _preprocess(self, images: list['torch.Tensor'], do_resize: bool, size: dict[str, int], crop_pct: float, interpolation: Optional['F.InterpolationMode'], do_center_crop: bool, crop_size: int, do_rescale: bool, rescale_factor: float, do_normalize: bool, image_mean: Optional[Union[float, list[float]]], image_std: Optional[Union[float, list[float]]], disable_grouping: Optional[bool], return_tensors: Optional[Union[str, TensorType]], **kwargs) -> BatchFeature: pass
7
1
27
2
19
6
3
0.25
1
10
4
0
4
0
4
38
131
10
97
57
60
24
44
25
39
6
4
2
11
1,388
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/convnext/modeling_convnext.py
transformers.models.convnext.modeling_convnext.ConvNextBackbone
from ...modeling_outputs import BackboneOutput, BaseModelOutputWithNoAttention, BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention import torch from ...utils.backbone_utils import BackboneMixin from ...utils.generic import can_return_tuple from typing import Optional from ...utils import auto_docstring, logging from torch import nn @auto_docstring(custom_intro='\n ConvNeXt backbone, to be used with frameworks like DETR and MaskFormer.\n ') class ConvNextBackbone(ConvNextPreTrainedModel, BackboneMixin): has_attentions = False def __init__(self, config): super().__init__(config) super()._init_backbone(config) self.embeddings = ConvNextEmbeddings(config) self.encoder = ConvNextEncoder(config) self.num_features = [config.hidden_sizes[0]] + config.hidden_sizes hidden_states_norms = {} for stage, num_channels in zip(self._out_features, self.channels): hidden_states_norms[stage] = ConvNextLayerNorm(num_channels, data_format='channels_first') self.hidden_states_norms = nn.ModuleDict(hidden_states_norms) self.post_init() @can_return_tuple @auto_docstring def forward(self, pixel_values: torch.Tensor, output_hidden_states: Optional[bool]=None) -> BackboneOutput: """ Examples: ```python >>> from transformers import AutoImageProcessor, AutoBackbone >>> import torch >>> from PIL import Image >>> import requests >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> processor = AutoImageProcessor.from_pretrained("facebook/convnext-tiny-224") >>> model = AutoBackbone.from_pretrained("facebook/convnext-tiny-224") >>> inputs = processor(image, return_tensors="pt") >>> outputs = model(**inputs) ```""" if output_hidden_states is None: output_hidden_states = self.config.output_hidden_states embedding_output = self.embeddings(pixel_values) outputs: BaseModelOutputWithPoolingAndNoAttention = self.encoder(embedding_output, output_hidden_states=True) hidden_states = outputs.hidden_states feature_maps = [] for stage, hidden_state in zip(self.stage_names, hidden_states): if stage in self.out_features: hidden_state = self.hidden_states_norms[stage](hidden_state) feature_maps.append(hidden_state) return BackboneOutput(feature_maps=tuple(feature_maps), hidden_states=hidden_states if output_hidden_states else None)
@auto_docstring(custom_intro='\n ConvNeXt backbone, to be used with frameworks like DETR and MaskFormer.\n ') class ConvNextBackbone(ConvNextPreTrainedModel, BackboneMixin): def __init__(self, config): pass @can_return_tuple @auto_docstring def forward(self, pixel_values: torch.Tensor, output_hidden_states: Optional[bool]=None) -> BackboneOutput: ''' Examples: ```python >>> from transformers import AutoImageProcessor, AutoBackbone >>> import torch >>> from PIL import Image >>> import requests >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> processor = AutoImageProcessor.from_pretrained("facebook/convnext-tiny-224") >>> model = AutoBackbone.from_pretrained("facebook/convnext-tiny-224") >>> inputs = processor(image, return_tensors="pt") >>> outputs = model(**inputs) ```''' pass
6
1
37
7
22
9
6
0.37
2
8
4
0
2
4
2
15
78
15
46
21
36
17
29
15
26
9
2
2
11
1,389
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/convnext/modeling_convnext.py
transformers.models.convnext.modeling_convnext.ConvNextDropPath
from typing import Optional from torch import nn import torch class ConvNextDropPath(nn.Module): """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).""" def __init__(self, drop_prob: Optional[float]=None) -> None: super().__init__() self.drop_prob = drop_prob def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: return drop_path(hidden_states, self.drop_prob, self.training) def extra_repr(self) -> str: return f'p={self.drop_prob}'
class ConvNextDropPath(nn.Module): '''Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).''' def __init__(self, drop_prob: Optional[float]=None) -> None: pass def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: pass def extra_repr(self) -> str: pass
4
1
2
0
2
0
1
0.13
1
4
0
0
3
1
3
13
12
3
8
5
4
1
8
5
4
1
1
0
3
1,390
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/convnext/modeling_convnext.py
transformers.models.convnext.modeling_convnext.ConvNextEmbeddings
from torch import nn import torch class ConvNextEmbeddings(nn.Module): """This class is comparable to (and inspired by) the SwinEmbeddings class found in src/transformers/models/swin/modeling_swin.py. """ def __init__(self, config): super().__init__() self.patch_embeddings = nn.Conv2d(config.num_channels, config.hidden_sizes[0], kernel_size=config.patch_size, stride=config.patch_size) self.layernorm = ConvNextLayerNorm(config.hidden_sizes[0], eps=1e-06, data_format='channels_first') self.num_channels = config.num_channels def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor: num_channels = pixel_values.shape[1] if num_channels != self.num_channels: raise ValueError('Make sure that the channel dimension of the pixel values match with the one set in the configuration.') embeddings = self.patch_embeddings(pixel_values) embeddings = self.layernorm(embeddings) return embeddings
class ConvNextEmbeddings(nn.Module): '''This class is comparable to (and inspired by) the SwinEmbeddings class found in src/transformers/models/swin/modeling_swin.py. ''' def __init__(self, config): pass def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor: pass
3
1
8
0
8
0
2
0.18
1
4
1
0
2
3
2
12
22
2
17
8
14
3
13
8
10
2
1
1
3
1,391
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/convnext/modeling_convnext.py
transformers.models.convnext.modeling_convnext.ConvNextEncoder
import torch from torch import nn from ...modeling_outputs import BackboneOutput, BaseModelOutputWithNoAttention, BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention from typing import Optional class ConvNextEncoder(nn.Module): def __init__(self, config): super().__init__() self.stages = nn.ModuleList() drop_path_rates = [x.tolist() for x in torch.linspace(0, config.drop_path_rate, sum(config.depths), device='cpu').split(config.depths)] prev_chs = config.hidden_sizes[0] for i in range(config.num_stages): out_chs = config.hidden_sizes[i] stage = ConvNextStage(config, in_channels=prev_chs, out_channels=out_chs, stride=2 if i > 0 else 1, depth=config.depths[i], drop_path_rates=drop_path_rates[i]) self.stages.append(stage) prev_chs = out_chs def forward(self, hidden_states: torch.Tensor, output_hidden_states: Optional[bool]=False) -> BaseModelOutputWithNoAttention: all_hidden_states = [hidden_states] if output_hidden_states else None for layer_module in self.stages: hidden_states = layer_module(hidden_states) if all_hidden_states is not None: all_hidden_states.append(hidden_states) return BaseModelOutputWithNoAttention(last_hidden_state=hidden_states, hidden_states=all_hidden_states)
class ConvNextEncoder(nn.Module): def __init__(self, config): pass def forward(self, hidden_states: torch.Tensor, output_hidden_states: Optional[bool]=False) -> BaseModelOutputWithNoAttention: pass
3
0
22
3
19
0
5
0
1
7
2
0
2
1
2
12
45
6
39
16
31
0
22
11
19
6
1
2
9
1,392
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/convnext/modeling_convnext.py
transformers.models.convnext.modeling_convnext.ConvNextForImageClassification
from torch import nn from ...utils.generic import can_return_tuple from ...utils import auto_docstring, logging from typing import Optional from ...modeling_outputs import BackboneOutput, BaseModelOutputWithNoAttention, BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention import torch @auto_docstring(custom_intro='\n ConvNext Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ') class ConvNextForImageClassification(ConvNextPreTrainedModel): accepts_loss_kwargs = False def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.convnext = ConvNextModel(config) if config.num_labels > 0: self.classifier = nn.Linear(config.hidden_sizes[-1], config.num_labels) else: self.classifier = nn.Identity() self.post_init() @can_return_tuple @auto_docstring def forward(self, pixel_values: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, **kwargs) -> ImageClassifierOutputWithNoAttention: """ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the image classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ outputs: BaseModelOutputWithPoolingAndNoAttention = self.convnext(pixel_values, **kwargs) pooled_output = outputs.pooler_output logits = self.classifier(pooled_output) loss = None if labels is not None: loss = self.loss_function(labels=labels, pooled_logits=logits, config=self.config) return ImageClassifierOutputWithNoAttention(loss=loss, logits=logits, hidden_states=outputs.hidden_states)
@auto_docstring(custom_intro='\n ConvNext Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ') class ConvNextForImageClassification(ConvNextPreTrainedModel): def __init__(self, config): pass @can_return_tuple @auto_docstring def forward(self, pixel_values: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, **kwargs) -> ImageClassifierOutputWithNoAttention: ''' labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the image classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). ''' pass
6
1
33
5
24
4
8
0.14
1
5
2
0
2
3
2
3
74
10
56
19
40
8
32
12
29
13
2
3
15
1,393
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/convnext/modeling_convnext.py
transformers.models.convnext.modeling_convnext.ConvNextLayer
import torch from ...activations import ACT2FN from torch import nn class ConvNextLayer(nn.Module): """This corresponds to the `Block` class in the original implementation. There are two equivalent implementations: [DwConv, LayerNorm (channels_first), Conv, GELU,1x1 Conv]; all in (N, C, H, W) (2) [DwConv, Permute to (N, H, W, C), LayerNorm (channels_last), Linear, GELU, Linear]; Permute back The authors used (2) as they find it slightly faster in PyTorch. Args: config ([`ConvNextConfig`]): Model configuration class. dim (`int`): Number of input channels. drop_path (`float`): Stochastic depth rate. Default: 0.0. """ def __init__(self, config, dim, drop_path=0): super().__init__() self.dwconv = nn.Conv2d(dim, dim, kernel_size=7, padding=3, groups=dim) self.layernorm = ConvNextLayerNorm(dim, eps=1e-06) self.pwconv1 = nn.Linear(dim, 4 * dim) self.act = ACT2FN[config.hidden_act] self.pwconv2 = nn.Linear(4 * dim, dim) self.layer_scale_parameter = nn.Parameter(config.layer_scale_init_value * torch.ones(dim), requires_grad=True) if config.layer_scale_init_value > 0 else None self.drop_path = ConvNextDropPath(drop_path) if drop_path > 0.0 else nn.Identity() def forward(self, features: torch.Tensor) -> torch.Tensor: residual = features features = self.dwconv(features) features = features.permute(0, 2, 3, 1) features = self.layernorm(features) features = self.pwconv1(features) features = self.act(features) features = self.pwconv2(features) if self.layer_scale_parameter is not None: features = self.layer_scale_parameter * features features = features.permute(0, 3, 1, 2) features = residual + self.drop_path(features) return features
class ConvNextLayer(nn.Module): '''This corresponds to the `Block` class in the original implementation. There are two equivalent implementations: [DwConv, LayerNorm (channels_first), Conv, GELU,1x1 Conv]; all in (N, C, H, W) (2) [DwConv, Permute to (N, H, W, C), LayerNorm (channels_last), Linear, GELU, Linear]; Permute back The authors used (2) as they find it slightly faster in PyTorch. Args: config ([`ConvNextConfig`]): Model configuration class. dim (`int`): Number of input channels. drop_path (`float`): Stochastic depth rate. Default: 0.0. ''' def __init__(self, config, dim, drop_path=0): pass def forward(self, features: torch.Tensor) -> torch.Tensor: pass
3
1
14
1
13
2
3
0.48
1
4
2
0
2
7
2
12
42
6
27
11
24
13
23
11
20
3
1
1
5
1,394
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/convnext/modeling_convnext.py
transformers.models.convnext.modeling_convnext.ConvNextLayerNorm
from torch import nn import torch class ConvNextLayerNorm(nn.LayerNorm): """LayerNorm that supports two data formats: channels_last (default) or channels_first. The ordering of the dimensions in the inputs. channels_last corresponds to inputs with shape (batch_size, height, width, channels) while channels_first corresponds to inputs with shape (batch_size, channels, height, width). """ def __init__(self, normalized_shape, *, eps=1e-06, data_format='channels_last', **kwargs): super().__init__(normalized_shape, eps=eps, **kwargs) if data_format not in ['channels_last', 'channels_first']: raise NotImplementedError(f'Unsupported data format: {data_format}') self.data_format = data_format def forward(self, features: torch.Tensor) -> torch.Tensor: """ Args: features: Tensor of shape (batch_size, channels, height, width) OR (batch_size, height, width, channels) """ if self.data_format == 'channels_first': features = features.permute(0, 2, 3, 1) features = super().forward(features) features = features.permute(0, 3, 1, 2) else: features = super().forward(features) return features
class ConvNextLayerNorm(nn.LayerNorm): '''LayerNorm that supports two data formats: channels_last (default) or channels_first. The ordering of the dimensions in the inputs. channels_last corresponds to inputs with shape (batch_size, height, width, channels) while channels_first corresponds to inputs with shape (batch_size, channels, height, width). ''' def __init__(self, normalized_shape, *, eps=1e-06, data_format='channels_last', **kwargs): pass def forward(self, features: torch.Tensor) -> torch.Tensor: ''' Args: features: Tensor of shape (batch_size, channels, height, width) OR (batch_size, height, width, channels) ''' pass
3
2
11
0
11
0
3
0.18
1
3
0
0
2
5
2
12
28
2
22
11
19
4
21
11
18
3
1
1
5
1,395
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/convnext/modeling_convnext.py
transformers.models.convnext.modeling_convnext.ConvNextModel
from torch import nn from ...utils.generic import can_return_tuple from ...utils import auto_docstring, logging import torch from ...modeling_outputs import BackboneOutput, BaseModelOutputWithNoAttention, BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention from typing import Optional @auto_docstring class ConvNextModel(ConvNextPreTrainedModel): def __init__(self, config): super().__init__(config) self.config = config self.embeddings = ConvNextEmbeddings(config) self.encoder = ConvNextEncoder(config) self.layernorm = nn.LayerNorm(config.hidden_sizes[-1], eps=config.layer_norm_eps) self.post_init() @can_return_tuple @auto_docstring def forward(self, pixel_values: Optional[torch.FloatTensor]=None, output_hidden_states: Optional[bool]=None) -> BaseModelOutputWithPoolingAndNoAttention: if output_hidden_states is None: output_hidden_states = self.config.output_hidden_states if pixel_values is None: raise ValueError('You have to specify pixel_values') embedding_output = self.embeddings(pixel_values) encoder_outputs: BaseModelOutputWithNoAttention = self.encoder(embedding_output, output_hidden_states=output_hidden_states) last_hidden_state = encoder_outputs.last_hidden_state pooled_output = self.layernorm(last_hidden_state.mean([-2, -1])) return BaseModelOutputWithPoolingAndNoAttention(last_hidden_state=last_hidden_state, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states)
@auto_docstring class ConvNextModel(ConvNextPreTrainedModel): def __init__(self, config): pass @can_return_tuple @auto_docstring def forward(self, pixel_values: Optional[torch.FloatTensor]=None, output_hidden_states: Optional[bool]=None) -> BaseModelOutputWithPoolingAndNoAttention: pass
6
0
24
5
17
2
3
0.07
1
6
3
0
2
4
2
3
57
11
43
17
27
3
20
11
17
5
2
1
6
1,396
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/convnext/modeling_convnext.py
transformers.models.convnext.modeling_convnext.ConvNextPreTrainedModel
from .configuration_convnext import ConvNextConfig from ...utils import auto_docstring, logging from torch import nn from ...modeling_utils import PreTrainedModel @auto_docstring class ConvNextPreTrainedModel(PreTrainedModel): config: ConvNextConfig base_model_prefix = 'convnext' main_input_name = 'pixel_values' _no_split_modules = ['ConvNextLayer'] _can_record_outputs = {} def _init_weights(self, module): """Initialize the weights""" if isinstance(module, (nn.Linear, nn.Conv2d)): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, (nn.LayerNorm, ConvNextLayerNorm)): module.bias.data.zero_() module.weight.data.fill_(1.0) elif isinstance(module, ConvNextLayer): if module.layer_scale_parameter is not None: module.layer_scale_parameter.data.fill_(self.config.layer_scale_init_value)
@auto_docstring class ConvNextPreTrainedModel(PreTrainedModel): def _init_weights(self, module): '''Initialize the weights''' pass
3
1
11
0
8
3
4
0.54
1
0
0
3
1
0
1
1
22
2
13
6
11
7
12
6
10
4
1
2
4
1,397
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/convnext/modeling_convnext.py
transformers.models.convnext.modeling_convnext.ConvNextStage
import torch from torch import nn class ConvNextStage(nn.Module): """ConvNeXT stage, consisting of an optional downsampling layer + multiple residual blocks. Args: config ([`ConvNextConfig`]): Model configuration class. in_channels (`int`): Number of input channels. out_channels (`int`): Number of output channels. depth (`int`): Number of residual blocks. drop_path_rates(`list[float]`): Stochastic depth rates for each layer. """ def __init__(self, config, in_channels, out_channels, kernel_size=2, stride=2, depth=2, drop_path_rates=None): super().__init__() if in_channels != out_channels or stride > 1: self.downsampling_layer = nn.ModuleList([ConvNextLayerNorm(in_channels, eps=1e-06, data_format='channels_first'), nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride)]) else: self.downsampling_layer = nn.ModuleList() drop_path_rates = drop_path_rates or [0.0] * depth self.layers = nn.ModuleList([ConvNextLayer(config, dim=out_channels, drop_path=drop_path_rates[j]) for j in range(depth)]) def forward(self, features: torch.Tensor) -> torch.Tensor: for layer in self.downsampling_layer: features = layer(features) for layer in self.layers: features = layer(features) return features
class ConvNextStage(nn.Module): '''ConvNeXT stage, consisting of an optional downsampling layer + multiple residual blocks. Args: config ([`ConvNextConfig`]): Model configuration class. in_channels (`int`): Number of input channels. out_channels (`int`): Number of output channels. depth (`int`): Number of residual blocks. drop_path_rates(`list[float]`): Stochastic depth rates for each layer. ''' def __init__(self, config, in_channels, out_channels, kernel_size=2, stride=2, depth=2, drop_path_rates=None): pass def forward(self, features: torch.Tensor) -> torch.Tensor: pass
3
1
9
1
9
0
2
0.44
1
5
2
0
2
2
2
12
30
4
18
5
15
8
12
5
9
2
1
1
3
1,398
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/convnextv2/configuration_convnextv2.py
transformers.models.convnextv2.configuration_convnextv2.ConvNextV2Config
from ...configuration_utils import PretrainedConfig from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices class ConvNextV2Config(BackboneConfigMixin, PretrainedConfig): """ This is the configuration class to store the configuration of a [`ConvNextV2Model`]. It is used to instantiate an ConvNeXTV2 model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the ConvNeXTV2 [facebook/convnextv2-tiny-1k-224](https://huggingface.co/facebook/convnextv2-tiny-1k-224) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: num_channels (`int`, *optional*, defaults to 3): The number of input channels. patch_size (`int`, *optional*, defaults to 4): Patch size to use in the patch embedding layer. num_stages (`int`, *optional*, defaults to 4): The number of stages in the model. hidden_sizes (`list[int]`, *optional*, defaults to `[96, 192, 384, 768]`): Dimensionality (hidden size) at each stage. depths (`list[int]`, *optional*, defaults to `[3, 3, 9, 3]`): Depth (number of blocks) for each stage. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in each block. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-12): The epsilon used by the layer normalization layers. drop_path_rate (`float`, *optional*, defaults to 0.0): The drop rate for stochastic depth. image_size (`int`, *optional*, defaults to 224): The size (resolution) of each image. out_features (`list[str]`, *optional*): If used as backbone, list of features to output. Can be any of `"stem"`, `"stage1"`, `"stage2"`, etc. (depending on how many stages the model has). If unset and `out_indices` is set, will default to the corresponding stages. If unset and `out_indices` is unset, will default to the last stage. Must be in the same order as defined in the `stage_names` attribute. out_indices (`list[int]`, *optional*): If used as backbone, list of indices of features to output. Can be any of 0, 1, 2, etc. (depending on how many stages the model has). If unset and `out_features` is set, will default to the corresponding stages. If unset and `out_features` is unset, will default to the last stage. Must be in the same order as defined in the `stage_names` attribute. Example: ```python >>> from transformers import ConvNeXTV2Config, ConvNextV2Model >>> # Initializing a ConvNeXTV2 convnextv2-tiny-1k-224 style configuration >>> configuration = ConvNeXTV2Config() >>> # Initializing a model (with random weights) from the convnextv2-tiny-1k-224 style configuration >>> model = ConvNextV2Model(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = 'convnextv2' def __init__(self, num_channels=3, patch_size=4, num_stages=4, hidden_sizes=None, depths=None, hidden_act='gelu', initializer_range=0.02, layer_norm_eps=1e-12, drop_path_rate=0.0, image_size=224, out_features=None, out_indices=None, **kwargs): super().__init__(**kwargs) self.num_channels = num_channels self.patch_size = patch_size self.num_stages = num_stages self.hidden_sizes = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes self.depths = [3, 3, 9, 3] if depths is None else depths self.hidden_act = hidden_act self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.drop_path_rate = drop_path_rate self.image_size = image_size self.stage_names = ['stem'] + [f'stage{idx}' for idx in range(1, len(self.depths) + 1)] self._out_features, self._out_indices = get_aligned_output_features_output_indices(out_features=out_features, out_indices=out_indices, stage_names=self.stage_names)
class ConvNextV2Config(BackboneConfigMixin, PretrainedConfig): ''' This is the configuration class to store the configuration of a [`ConvNextV2Model`]. It is used to instantiate an ConvNeXTV2 model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the ConvNeXTV2 [facebook/convnextv2-tiny-1k-224](https://huggingface.co/facebook/convnextv2-tiny-1k-224) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: num_channels (`int`, *optional*, defaults to 3): The number of input channels. patch_size (`int`, *optional*, defaults to 4): Patch size to use in the patch embedding layer. num_stages (`int`, *optional*, defaults to 4): The number of stages in the model. hidden_sizes (`list[int]`, *optional*, defaults to `[96, 192, 384, 768]`): Dimensionality (hidden size) at each stage. depths (`list[int]`, *optional*, defaults to `[3, 3, 9, 3]`): Depth (number of blocks) for each stage. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in each block. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-12): The epsilon used by the layer normalization layers. drop_path_rate (`float`, *optional*, defaults to 0.0): The drop rate for stochastic depth. image_size (`int`, *optional*, defaults to 224): The size (resolution) of each image. out_features (`list[str]`, *optional*): If used as backbone, list of features to output. Can be any of `"stem"`, `"stage1"`, `"stage2"`, etc. (depending on how many stages the model has). If unset and `out_indices` is set, will default to the corresponding stages. If unset and `out_indices` is unset, will default to the last stage. Must be in the same order as defined in the `stage_names` attribute. out_indices (`list[int]`, *optional*): If used as backbone, list of indices of features to output. Can be any of 0, 1, 2, etc. (depending on how many stages the model has). If unset and `out_features` is set, will default to the corresponding stages. If unset and `out_features` is unset, will default to the last stage. Must be in the same order as defined in the `stage_names` attribute. Example: ```python >>> from transformers import ConvNeXTV2Config, ConvNextV2Model >>> # Initializing a ConvNeXTV2 convnextv2-tiny-1k-224 style configuration >>> configuration = ConvNeXTV2Config() >>> # Initializing a model (with random weights) from the convnextv2-tiny-1k-224 style configuration >>> model = ConvNextV2Model(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```''' def __init__(self, num_channels=3, patch_size=4, num_stages=4, hidden_sizes=None, depths=None, hidden_act='gelu', initializer_range=0.02, layer_norm_eps=1e-12, drop_path_rate=0.0, image_size=224, out_features=None, out_indices=None, **kwargs): pass
2
1
32
1
31
0
3
1.48
2
2
0
0
1
13
1
6
91
9
33
30
16
49
16
15
14
3
1
0
3
1,399
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/convnextv2/modeling_convnextv2.py
transformers.models.convnextv2.modeling_convnextv2.ConvNextV2Backbone
from ...utils import auto_docstring, logging from ...modeling_outputs import BackboneOutput, BaseModelOutputWithNoAttention, BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention from ...utils.backbone_utils import BackboneMixin import torch from typing import Optional from torch import nn from ...utils.generic import can_return_tuple @auto_docstring(custom_intro='\n ConvNeXT V2 backbone, to be used with frameworks like DETR and MaskFormer.\n ') class ConvNextV2Backbone(ConvNextV2PreTrainedModel, BackboneMixin): has_attentions = False def __init__(self, config): super().__init__(config) super()._init_backbone(config) self.embeddings = ConvNextV2Embeddings(config) self.encoder = ConvNextV2Encoder(config) self.num_features = [config.hidden_sizes[0]] + config.hidden_sizes hidden_states_norms = {} for stage, num_channels in zip(self._out_features, self.channels): hidden_states_norms[stage] = ConvNextV2LayerNorm(num_channels, data_format='channels_first') self.hidden_states_norms = nn.ModuleDict(hidden_states_norms) self.post_init() @can_return_tuple @auto_docstring def forward(self, pixel_values: torch.Tensor, output_hidden_states: Optional[bool]=None) -> BackboneOutput: """ Examples: ```python >>> from transformers import AutoImageProcessor, AutoBackbone >>> import torch >>> from PIL import Image >>> import requests >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> processor = AutoImageProcessor.from_pretrained("facebook/convnextv2-tiny-1k-224") >>> model = AutoBackbone.from_pretrained("facebook/convnextv2-tiny-1k-224") >>> inputs = processor(image, return_tensors="pt") >>> outputs = model(**inputs) ```""" if output_hidden_states is None: output_hidden_states = self.config.output_hidden_states embedding_output = self.embeddings(pixel_values) outputs: BaseModelOutputWithPoolingAndNoAttention = self.encoder(embedding_output, output_hidden_states=True) hidden_states = outputs.hidden_states feature_maps = [] for stage, hidden_state in zip(self.stage_names, hidden_states): if stage in self.out_features: hidden_state = self.hidden_states_norms[stage](hidden_state) feature_maps.append(hidden_state) return BackboneOutput(feature_maps=tuple(feature_maps), hidden_states=hidden_states if output_hidden_states else None)
@auto_docstring(custom_intro='\n ConvNeXT V2 backbone, to be used with frameworks like DETR and MaskFormer.\n ') class ConvNextV2Backbone(ConvNextV2PreTrainedModel, BackboneMixin): def __init__(self, config): pass @can_return_tuple @auto_docstring def forward(self, pixel_values: torch.Tensor, output_hidden_states: Optional[bool]=None) -> BackboneOutput: ''' Examples: ```python >>> from transformers import AutoImageProcessor, AutoBackbone >>> import torch >>> from PIL import Image >>> import requests >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> processor = AutoImageProcessor.from_pretrained("facebook/convnextv2-tiny-1k-224") >>> model = AutoBackbone.from_pretrained("facebook/convnextv2-tiny-1k-224") >>> inputs = processor(image, return_tensors="pt") >>> outputs = model(**inputs) ```''' pass
6
1
37
7
22
9
6
0.37
2
8
4
0
2
4
2
15
78
15
46
21
36
17
29
15
26
9
2
2
11