id
int64
0
328k
repository_name
stringlengths
7
58
file_path
stringlengths
9
302
class_name
stringlengths
5
256
human_written_code
stringlengths
16
2.16M
class_skeleton
stringlengths
18
1.49M
total_program_units
int64
1
1.76k
total_doc_str
int64
0
771
AvgCountLine
float64
0
7.89k
AvgCountLineBlank
float64
0
297
AvgCountLineCode
float64
0
7.89k
AvgCountLineComment
float64
0
7.89k
AvgCyclomatic
float64
0
130
CommentToCodeRatio
float64
0
168
CountClassBase
float64
0
40
CountClassCoupled
float64
0
583
CountClassCoupledModified
float64
0
575
CountClassDerived
float64
0
5.35k
CountDeclInstanceMethod
float64
0
529
CountDeclInstanceVariable
float64
0
296
CountDeclMethod
float64
0
599
CountDeclMethodAll
float64
0
1.12k
CountLine
float64
1
40.4k
CountLineBlank
float64
0
8.16k
CountLineCode
float64
1
25.7k
CountLineCodeDecl
float64
1
8.15k
CountLineCodeExe
float64
0
24.2k
CountLineComment
float64
0
16.5k
CountStmt
float64
1
9.71k
CountStmtDecl
float64
1
8.15k
CountStmtExe
float64
0
9.69k
MaxCyclomatic
float64
0
759
MaxInheritanceTree
float64
0
16
MaxNesting
float64
0
34
SumCyclomatic
float64
0
2.9k
1,100
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/canine/modeling_canine.py
transformers.models.canine.modeling_canine.CanineAttention
from typing import Optional, Union from torch import nn from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer import torch class CanineAttention(nn.Module): """ Additional arguments related to local attention: - **local** (`bool`, *optional*, defaults to `False`) -- Whether to apply local attention. - **always_attend_to_first_position** (`bool`, *optional*, defaults to `False`) -- Should all blocks be able to attend to the `to_tensor`'s first position (e.g. a [CLS] position)? - **first_position_attends_to_all** (`bool`, *optional*, defaults to `False`) -- Should the *from_tensor*'s first position be able to attend to all positions within the *from_tensor*? - **attend_from_chunk_width** (`int`, *optional*, defaults to 128) -- The width of each block-wise chunk in `from_tensor`. - **attend_from_chunk_stride** (`int`, *optional*, defaults to 128) -- The number of elements to skip when moving to the next block in `from_tensor`. - **attend_to_chunk_width** (`int`, *optional*, defaults to 128) -- The width of each block-wise chunk in *to_tensor*. - **attend_to_chunk_stride** (`int`, *optional*, defaults to 128) -- The number of elements to skip when moving to the next block in `to_tensor`. """ def __init__(self, config, local=False, always_attend_to_first_position: bool=False, first_position_attends_to_all: bool=False, attend_from_chunk_width: int=128, attend_from_chunk_stride: int=128, attend_to_chunk_width: int=128, attend_to_chunk_stride: int=128): super().__init__() self.self = CanineSelfAttention(config) self.output = CanineSelfOutput(config) self.pruned_heads = set() self.local = local if attend_from_chunk_width < attend_from_chunk_stride: raise ValueError('`attend_from_chunk_width` < `attend_from_chunk_stride` would cause sequence positions to get skipped.') if attend_to_chunk_width < attend_to_chunk_stride: raise ValueError('`attend_to_chunk_width` < `attend_to_chunk_stride`would cause sequence positions to get skipped.') self.always_attend_to_first_position = always_attend_to_first_position self.first_position_attends_to_all = first_position_attends_to_all self.attend_from_chunk_width = attend_from_chunk_width self.attend_from_chunk_stride = attend_from_chunk_stride self.attend_to_chunk_width = attend_to_chunk_width self.attend_to_chunk_stride = attend_to_chunk_stride def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices(heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads) self.self.query = prune_linear_layer(self.self.query, index) self.self.key = prune_linear_layer(self.self.key, index) self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward(self, hidden_states: tuple[torch.FloatTensor], attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False) -> tuple[torch.FloatTensor, Optional[torch.FloatTensor]]: if not self.local: self_outputs = self.self(hidden_states, hidden_states, attention_mask, head_mask, output_attentions) attention_output = self_outputs[0] else: from_seq_length = to_seq_length = hidden_states.shape[1] from_tensor = to_tensor = hidden_states from_chunks = [] if self.first_position_attends_to_all: from_chunks.append((0, 1)) from_start = 1 else: from_start = 0 for chunk_start in range(from_start, from_seq_length, self.attend_from_chunk_stride): chunk_end = min(from_seq_length, chunk_start + self.attend_from_chunk_width) from_chunks.append((chunk_start, chunk_end)) to_chunks = [] if self.first_position_attends_to_all: to_chunks.append((0, to_seq_length)) for chunk_start in range(0, to_seq_length, self.attend_to_chunk_stride): chunk_end = min(to_seq_length, chunk_start + self.attend_to_chunk_width) to_chunks.append((chunk_start, chunk_end)) if len(from_chunks) != len(to_chunks): raise ValueError(f'Expected to have same number of `from_chunks` ({from_chunks}) and `to_chunks` ({from_chunks}). Check strides.') attention_output_chunks = [] attention_probs_chunks = [] for (from_start, from_end), (to_start, to_end) in zip(from_chunks, to_chunks): from_tensor_chunk = from_tensor[:, from_start:from_end, :] to_tensor_chunk = to_tensor[:, to_start:to_end, :] attention_mask_chunk = attention_mask[:, from_start:from_end, to_start:to_end] if self.always_attend_to_first_position: cls_attention_mask = attention_mask[:, from_start:from_end, 0:1] attention_mask_chunk = torch.cat([cls_attention_mask, attention_mask_chunk], dim=2) cls_position = to_tensor[:, 0:1, :] to_tensor_chunk = torch.cat([cls_position, to_tensor_chunk], dim=1) attention_outputs_chunk = self.self(from_tensor_chunk, to_tensor_chunk, attention_mask_chunk, head_mask, output_attentions) attention_output_chunks.append(attention_outputs_chunk[0]) if output_attentions: attention_probs_chunks.append(attention_outputs_chunk[1]) attention_output = torch.cat(attention_output_chunks, dim=1) attention_output = self.output(attention_output, hidden_states) outputs = (attention_output,) if not self.local: outputs = outputs + self_outputs[1:] else: outputs = outputs + tuple(attention_probs_chunks) return outputs
class CanineAttention(nn.Module): ''' Additional arguments related to local attention: - **local** (`bool`, *optional*, defaults to `False`) -- Whether to apply local attention. - **always_attend_to_first_position** (`bool`, *optional*, defaults to `False`) -- Should all blocks be able to attend to the `to_tensor`'s first position (e.g. a [CLS] position)? - **first_position_attends_to_all** (`bool`, *optional*, defaults to `False`) -- Should the *from_tensor*'s first position be able to attend to all positions within the *from_tensor*? - **attend_from_chunk_width** (`int`, *optional*, defaults to 128) -- The width of each block-wise chunk in `from_tensor`. - **attend_from_chunk_stride** (`int`, *optional*, defaults to 128) -- The number of elements to skip when moving to the next block in `from_tensor`. - **attend_to_chunk_width** (`int`, *optional*, defaults to 128) -- The width of each block-wise chunk in *to_tensor*. - **attend_to_chunk_stride** (`int`, *optional*, defaults to 128) -- The number of elements to skip when moving to the next block in `to_tensor`. ''' def __init__(self, config, local=False, always_attend_to_first_position: bool=False, first_position_attends_to_all: bool=False, attend_from_chunk_width: int=128, attend_from_chunk_stride: int=128, attend_to_chunk_width: int=128, attend_to_chunk_stride: int=128): pass def prune_heads(self, heads): pass def forward(self, hidden_states: tuple[torch.FloatTensor], attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False) -> tuple[torch.FloatTensor, Optional[torch.FloatTensor]]: pass
4
1
41
4
34
4
5
0.25
1
10
2
0
3
10
3
13
141
15
102
50
82
26
72
34
68
11
1
3
16
1,101
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/canine/modeling_canine.py
transformers.models.canine.modeling_canine.CanineEmbeddings
import torch from typing import Optional, Union from torch import nn class CanineEmbeddings(nn.Module): """Construct the character, position and token_type embeddings.""" def __init__(self, config): super().__init__() self.config = config shard_embedding_size = config.hidden_size // config.num_hash_functions for i in range(config.num_hash_functions): name = f'HashBucketCodepointEmbedder_{i}' setattr(self, name, nn.Embedding(config.num_hash_buckets, shard_embedding_size)) self.char_position_embeddings = nn.Embedding(config.num_hash_buckets, config.hidden_size) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.register_buffer('position_ids', torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False) self.position_embedding_type = getattr(config, 'position_embedding_type', 'absolute') def _hash_bucket_tensors(self, input_ids, num_hashes: int, num_buckets: int): """ Converts ids to hash bucket ids via multiple hashing. Args: input_ids: The codepoints or other IDs to be hashed. num_hashes: The number of hash functions to use. num_buckets: The number of hash buckets (i.e. embeddings in each table). Returns: A list of tensors, each of which is the hash bucket IDs from one hash function. """ if num_hashes > len(_PRIMES): raise ValueError(f'`num_hashes` must be <= {len(_PRIMES)}') primes = _PRIMES[:num_hashes] result_tensors = [] for prime in primes: hashed = (input_ids + 1) * prime % num_buckets result_tensors.append(hashed) return result_tensors def _embed_hash_buckets(self, input_ids, embedding_size: int, num_hashes: int, num_buckets: int): """Converts IDs (e.g. codepoints) into embeddings via multiple hashing.""" if embedding_size % num_hashes != 0: raise ValueError(f'Expected `embedding_size` ({embedding_size}) % `num_hashes` ({num_hashes}) == 0') hash_bucket_tensors = self._hash_bucket_tensors(input_ids, num_hashes=num_hashes, num_buckets=num_buckets) embedding_shards = [] for i, hash_bucket_ids in enumerate(hash_bucket_tensors): name = f'HashBucketCodepointEmbedder_{i}' shard_embeddings = getattr(self, name)(hash_bucket_ids) embedding_shards.append(shard_embeddings) return torch.cat(embedding_shards, dim=-1) def forward(self, input_ids: Optional[torch.LongTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None) -> torch.FloatTensor: if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] seq_length = input_shape[1] if position_ids is None: position_ids = self.position_ids[:, :seq_length] if token_type_ids is None: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) if inputs_embeds is None: inputs_embeds = self._embed_hash_buckets(input_ids, self.config.hidden_size, self.config.num_hash_functions, self.config.num_hash_buckets) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = inputs_embeds + token_type_embeddings if self.position_embedding_type == 'absolute': position_embeddings = self.char_position_embeddings(position_ids) embeddings += position_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings
class CanineEmbeddings(nn.Module): '''Construct the character, position and token_type embeddings.''' def __init__(self, config): pass def _hash_bucket_tensors(self, input_ids, num_hashes: int, num_buckets: int): ''' Converts ids to hash bucket ids via multiple hashing. Args: input_ids: The codepoints or other IDs to be hashed. num_hashes: The number of hash functions to use. num_buckets: The number of hash buckets (i.e. embeddings in each table). Returns: A list of tensors, each of which is the hash bucket IDs from one hash function. ''' pass def _embed_hash_buckets(self, input_ids, embedding_size: int, num_hashes: int, num_buckets: int): '''Converts IDs (e.g. codepoints) into embeddings via multiple hashing.''' pass def forward(self, input_ids: Optional[torch.LongTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None) -> torch.FloatTensor: pass
5
3
23
4
16
4
4
0.24
1
5
0
0
4
6
4
14
99
21
63
34
52
15
52
28
47
6
1
1
14
1,102
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/canine/modeling_canine.py
transformers.models.canine.modeling_canine.CanineEncoder
from ...modeling_outputs import BaseModelOutput, ModelOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput from typing import Optional, Union from torch import nn import torch class CanineEncoder(nn.Module): def __init__(self, config, local=False, always_attend_to_first_position=False, first_position_attends_to_all=False, attend_from_chunk_width=128, attend_from_chunk_stride=128, attend_to_chunk_width=128, attend_to_chunk_stride=128): super().__init__() self.config = config self.layer = nn.ModuleList([CanineLayer(config, local, always_attend_to_first_position, first_position_attends_to_all, attend_from_chunk_width, attend_from_chunk_stride, attend_to_chunk_width, attend_to_chunk_stride) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward(self, hidden_states: tuple[torch.FloatTensor], attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False, output_hidden_states: Optional[bool]=False, return_dict: Optional[bool]=True) -> Union[tuple, BaseModelOutput]: all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None layer_outputs = layer_module(hidden_states, attention_mask, layer_head_mask, output_attentions) hidden_states = layer_outputs[0] if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)) return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions)
class CanineEncoder(nn.Module): def __init__(self, config, local=False, always_attend_to_first_position=False, first_position_attends_to_all=False, attend_from_chunk_width=128, attend_from_chunk_stride=128, attend_to_chunk_width=128, attend_to_chunk_stride=128): pass def forward(self, hidden_states: tuple[torch.FloatTensor], attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False, output_hidden_states: Optional[bool]=False, return_dict: Optional[bool]=True) -> Union[tuple, BaseModelOutput]: pass
3
0
36
3
33
0
6
0
1
7
2
0
2
3
2
12
74
7
67
30
46
0
24
11
21
10
1
2
11
1,103
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/canine/modeling_canine.py
transformers.models.canine.modeling_canine.CanineForMultipleChoice
import torch from ...utils import auto_docstring, logging from ...modeling_outputs import BaseModelOutput, ModelOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput from torch import nn from typing import Optional, Union from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss @auto_docstring class CanineForMultipleChoice(CaninePreTrainedModel): def __init__(self, config): super().__init__(config) self.canine = CanineModel(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, 1) self.post_init() @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, MultipleChoiceModelOutput]: """ input_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) token_type_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. [What are token type IDs?](../glossary#token-type-ids) position_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_choices, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert *input_ids* indices into associated vectors than the model's internal embedding lookup matrix. labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above) """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1] input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None inputs_embeds = inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1)) if inputs_embeds is not None else None outputs = self.canine(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) pooled_output = outputs[1] pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) reshaped_logits = logits.view(-1, num_choices) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(reshaped_logits, labels) if not return_dict: output = (reshaped_logits,) + outputs[2:] return (loss,) + output if loss is not None else output return MultipleChoiceModelOutput(loss=loss, logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
@auto_docstring class CanineForMultipleChoice(CaninePreTrainedModel): def __init__(self, config): pass @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, MultipleChoiceModelOutput]: ''' input_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) token_type_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. [What are token type IDs?](../glossary#token-type-ids) position_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_choices, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert *input_ids* indices into associated vectors than the model's internal embedding lookup matrix. labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above) ''' pass
5
1
37
5
29
4
6
0.11
1
4
2
0
2
3
2
3
82
10
65
27
44
7
28
14
25
11
2
1
12
1,104
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/canine/modeling_canine.py
transformers.models.canine.modeling_canine.CanineForQuestionAnswering
from typing import Optional, Union import torch from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...utils import auto_docstring, logging from ...modeling_outputs import BaseModelOutput, ModelOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput @auto_docstring class CanineForQuestionAnswering(CaninePreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.canine = CanineModel(config) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) self.post_init() @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, start_positions: Optional[torch.LongTensor]=None, end_positions: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, QuestionAnsweringModelOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.canine(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = outputs[0] logits = self.qa_outputs(sequence_output) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1) end_logits = end_logits.squeeze(-1) total_loss = None if start_positions is not None and end_positions is not None: if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) ignored_index = start_logits.size(1) start_positions.clamp_(0, ignored_index) end_positions.clamp_(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if not return_dict: output = (start_logits, end_logits) + outputs[2:] return (total_loss,) + output if total_loss is not None else output return QuestionAnsweringModelOutput(loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
@auto_docstring class CanineForQuestionAnswering(CaninePreTrainedModel): def __init__(self, config): pass @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, start_positions: Optional[torch.LongTensor]=None, end_positions: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, QuestionAnsweringModelOutput]: pass
5
0
41
5
30
7
4
0.19
1
4
2
0
2
3
2
3
92
10
69
30
45
13
32
16
29
7
2
2
8
1,105
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/canine/modeling_canine.py
transformers.models.canine.modeling_canine.CanineForSequenceClassification
from ...utils import auto_docstring, logging import torch from typing import Optional, Union from ...modeling_outputs import BaseModelOutput, ModelOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss @auto_docstring(custom_intro='\n CANINE Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled\n output) e.g. for GLUE tasks.\n ') class CanineForSequenceClassification(CaninePreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.canine = CanineModel(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) self.post_init() @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, SequenceClassifierOutput]: """ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.canine(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) pooled_output = outputs[1] pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = 'regression' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = 'single_label_classification' else: self.config.problem_type = 'multi_label_classification' if self.config.problem_type == 'regression': loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == 'single_label_classification': loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == 'multi_label_classification': loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return SequenceClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
@auto_docstring(custom_intro='\n CANINE Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled\n output) e.g. for GLUE tasks.\n ') class CanineForSequenceClassification(CaninePreTrainedModel): def __init__(self, config): pass @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, SequenceClassifierOutput]: ''' labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). ''' pass
5
1
40
4
33
4
7
0.1
1
5
2
0
2
4
2
3
88
9
72
26
51
7
34
13
31
12
2
3
13
1,106
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/canine/modeling_canine.py
transformers.models.canine.modeling_canine.CanineForTokenClassification
import torch from torch import nn from ...utils import auto_docstring, logging from typing import Optional, Union from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...modeling_outputs import BaseModelOutput, ModelOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput @auto_docstring class CanineForTokenClassification(CaninePreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.canine = CanineModel(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) self.post_init() @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, TokenClassifierOutput]: """ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`. Example: ```python >>> from transformers import AutoTokenizer, CanineForTokenClassification >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("google/canine-s") >>> model = CanineForTokenClassification.from_pretrained("google/canine-s") >>> inputs = tokenizer( ... "HuggingFace is a company based in Paris and New York", add_special_tokens=False, return_tensors="pt" ... ) >>> with torch.no_grad(): ... logits = model(**inputs).logits >>> predicted_token_class_ids = logits.argmax(-1) >>> # Note that tokens are classified rather then input words which means that >>> # there might be more predicted token classes than words. >>> # Multiple token classes might account for the same word >>> predicted_tokens_classes = [model.config.id2label[t.item()] for t in predicted_token_class_ids[0]] >>> predicted_tokens_classes # doctest: +SKIP ``` ```python >>> labels = predicted_token_class_ids >>> loss = model(**inputs, labels=labels).loss >>> round(loss.item(), 2) # doctest: +SKIP ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.canine(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = outputs[0] sequence_output = self.dropout(sequence_output) logits = self.classifier(sequence_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return TokenClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
@auto_docstring class CanineForTokenClassification(CaninePreTrainedModel): def __init__(self, config): pass @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, TokenClassifierOutput]: ''' labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`. Example: ```python >>> from transformers import AutoTokenizer, CanineForTokenClassification >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("google/canine-s") >>> model = CanineForTokenClassification.from_pretrained("google/canine-s") >>> inputs = tokenizer( ... "HuggingFace is a company based in Paris and New York", add_special_tokens=False, return_tensors="pt" ... ) >>> with torch.no_grad(): ... logits = model(**inputs).logits >>> predicted_token_class_ids = logits.argmax(-1) >>> # Note that tokens are classified rather then input words which means that >>> # there might be more predicted token classes than words. >>> # Multiple token classes might account for the same word >>> predicted_tokens_classes = [model.config.id2label[t.item()] for t in predicted_token_class_ids[0]] >>> predicted_tokens_classes # doctest: +SKIP ``` ```python >>> labels = predicted_token_class_ids >>> loss = model(**inputs, labels=labels).loss >>> round(loss.item(), 2) # doctest: +SKIP ```''' pass
5
1
47
9
24
14
3
0.55
1
4
2
0
2
4
2
3
97
18
51
26
34
28
22
13
19
5
2
1
6
1,107
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/canine/modeling_canine.py
transformers.models.canine.modeling_canine.CanineIntermediate
import torch from torch import nn from ...activations import ACT2FN class CanineIntermediate(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.FloatTensor) -> torch.FloatTensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states
class CanineIntermediate(nn.Module): def __init__(self, config): pass def forward(self, hidden_states: torch.FloatTensor) -> torch.FloatTensor: pass
3
0
6
0
6
0
2
0
1
2
0
0
2
2
2
12
13
1
12
5
9
0
11
5
8
2
1
1
3
1,108
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/canine/modeling_canine.py
transformers.models.canine.modeling_canine.CanineLMPredictionHead
from torch import nn import torch class CanineLMPredictionHead(nn.Module): def __init__(self, config): super().__init__() self.transform = CaninePredictionHeadTransform(config) self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) self.decoder.bias = self.bias def forward(self, hidden_states: tuple[torch.FloatTensor]) -> torch.FloatTensor: hidden_states = self.transform(hidden_states) hidden_states = self.decoder(hidden_states) return hidden_states
class CanineLMPredictionHead(nn.Module): def __init__(self, config): pass def forward(self, hidden_states: tuple[torch.FloatTensor]) -> torch.FloatTensor: pass
3
0
8
2
5
2
1
0.27
1
2
1
0
2
3
2
12
18
4
11
6
8
3
11
6
8
1
1
0
2
1,109
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/canine/modeling_canine.py
transformers.models.canine.modeling_canine.CanineLayer
from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer from typing import Optional, Union import torch from ...modeling_layers import GradientCheckpointingLayer class CanineLayer(GradientCheckpointingLayer): def __init__(self, config, local, always_attend_to_first_position, first_position_attends_to_all, attend_from_chunk_width, attend_from_chunk_stride, attend_to_chunk_width, attend_to_chunk_stride): super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = CanineAttention(config, local, always_attend_to_first_position, first_position_attends_to_all, attend_from_chunk_width, attend_from_chunk_stride, attend_to_chunk_width, attend_to_chunk_stride) self.intermediate = CanineIntermediate(config) self.output = CanineOutput(config) def forward(self, hidden_states: tuple[torch.FloatTensor], attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False) -> tuple[torch.FloatTensor, Optional[torch.FloatTensor]]: self_attention_outputs = self.attention(hidden_states, attention_mask, head_mask, output_attentions=output_attentions) attention_output = self_attention_outputs[0] outputs = self_attention_outputs[1:] layer_output = apply_chunking_to_forward(self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output) outputs = (layer_output,) + outputs return outputs def feed_forward_chunk(self, attention_output): intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) return layer_output
class CanineLayer(GradientCheckpointingLayer): def __init__(self, config, local, always_attend_to_first_position, first_position_attends_to_all, attend_from_chunk_width, attend_from_chunk_stride, attend_to_chunk_width, attend_to_chunk_stride): pass def forward(self, hidden_states: tuple[torch.FloatTensor], attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False) -> tuple[torch.FloatTensor, Optional[torch.FloatTensor]]: pass def feed_forward_chunk(self, attention_output): pass
4
0
18
1
17
0
1
0.02
1
5
3
0
3
5
3
13
56
5
51
31
31
1
19
15
15
1
1
0
3
1,110
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/canine/modeling_canine.py
transformers.models.canine.modeling_canine.CanineModel
import copy import torch from typing import Optional, Union from ...utils import auto_docstring, logging @auto_docstring class CanineModel(CaninePreTrainedModel): def __init__(self, config, add_pooling_layer=True): """ add_pooling_layer (bool, *optional*, defaults to `True`): Whether to add a pooling layer """ super().__init__(config) self.config = config shallow_config = copy.deepcopy(config) shallow_config.num_hidden_layers = 1 self.char_embeddings = CanineEmbeddings(config) self.initial_char_encoder = CanineEncoder(shallow_config, local=True, always_attend_to_first_position=False, first_position_attends_to_all=False, attend_from_chunk_width=config.local_transformer_stride, attend_from_chunk_stride=config.local_transformer_stride, attend_to_chunk_width=config.local_transformer_stride, attend_to_chunk_stride=config.local_transformer_stride) self.chars_to_molecules = CharactersToMolecules(config) self.encoder = CanineEncoder(config) self.projection = ConvProjection(config) self.final_char_encoder = CanineEncoder(shallow_config) self.pooler = CaninePooler(config) if add_pooling_layer else None self.post_init() def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) def _create_3d_attention_mask_from_input_mask(self, from_tensor, to_mask): """ Create 3D attention mask from a 2D tensor mask. Args: from_tensor: 2D or 3D Tensor of shape [batch_size, from_seq_length, ...]. to_mask: int32 Tensor of shape [batch_size, to_seq_length]. Returns: float Tensor of shape [batch_size, from_seq_length, to_seq_length]. """ batch_size, from_seq_length = (from_tensor.shape[0], from_tensor.shape[1]) to_seq_length = to_mask.shape[1] to_mask = torch.reshape(to_mask, (batch_size, 1, to_seq_length)).float() broadcast_ones = torch.ones(size=(batch_size, from_seq_length, 1), dtype=torch.float32, device=to_mask.device) mask = broadcast_ones * to_mask return mask def _downsample_attention_mask(self, char_attention_mask: torch.Tensor, downsampling_rate: int): """Downsample 2D character attention mask to 2D molecule attention mask using MaxPool1d layer.""" batch_size, char_seq_len = char_attention_mask.shape poolable_char_mask = torch.reshape(char_attention_mask, (batch_size, 1, char_seq_len)) pooled_molecule_mask = torch.nn.MaxPool1d(kernel_size=downsampling_rate, stride=downsampling_rate)(poolable_char_mask.float()) molecule_attention_mask = torch.squeeze(pooled_molecule_mask, dim=-1) return molecule_attention_mask def _repeat_molecules(self, molecules: torch.Tensor, char_seq_length: int) -> torch.Tensor: """Repeats molecules to make them the same length as the char sequence.""" rate = self.config.downsampling_rate molecules_without_extra_cls = molecules[:, 1:, :] repeated = torch.repeat_interleave(molecules_without_extra_cls, repeats=rate, dim=-2) last_molecule = molecules[:, -1:, :] remainder_length = char_seq_length % rate remainder_repeated = torch.repeat_interleave(last_molecule, repeats=remainder_length + rate, dim=-2) return torch.cat([repeated, remainder_repeated], dim=-2) @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, CanineModelOutputWithPooling]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time') elif input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError('You have to specify either input_ids or inputs_embeds') batch_size, seq_length = input_shape device = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: attention_mask = torch.ones((batch_size, seq_length), device=device) if token_type_ids is None: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape) molecule_attention_mask = self._downsample_attention_mask(attention_mask, downsampling_rate=self.config.downsampling_rate) extended_molecule_attention_mask: torch.Tensor = self.get_extended_attention_mask(molecule_attention_mask, (batch_size, molecule_attention_mask.shape[-1])) head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) input_char_embeddings = self.char_embeddings(input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds) char_attention_mask = self._create_3d_attention_mask_from_input_mask(input_ids if input_ids is not None else inputs_embeds, attention_mask) init_chars_encoder_outputs = self.initial_char_encoder(input_char_embeddings, attention_mask=char_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states) input_char_encoding = init_chars_encoder_outputs.last_hidden_state init_molecule_encoding = self.chars_to_molecules(input_char_encoding) encoder_outputs = self.encoder(init_molecule_encoding, attention_mask=extended_molecule_attention_mask, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) molecule_sequence_output = encoder_outputs[0] pooled_output = self.pooler(molecule_sequence_output) if self.pooler is not None else None repeated_molecules = self._repeat_molecules(molecule_sequence_output, char_seq_length=input_shape[-1]) concat = torch.cat([input_char_encoding, repeated_molecules], dim=-1) sequence_output = self.projection(concat) final_chars_encoder_outputs = self.final_char_encoder(sequence_output, attention_mask=extended_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states) sequence_output = final_chars_encoder_outputs.last_hidden_state if output_hidden_states: deep_encoder_hidden_states = encoder_outputs.hidden_states if return_dict else encoder_outputs[1] all_hidden_states = all_hidden_states + init_chars_encoder_outputs.hidden_states + deep_encoder_hidden_states + final_chars_encoder_outputs.hidden_states if output_attentions: deep_encoder_self_attentions = encoder_outputs.attentions if return_dict else encoder_outputs[-1] all_self_attentions = all_self_attentions + init_chars_encoder_outputs.attentions + deep_encoder_self_attentions + final_chars_encoder_outputs.attentions if not return_dict: output = (sequence_output, pooled_output) output += tuple((v for v in [all_hidden_states, all_self_attentions] if v is not None)) return output return CanineModelOutputWithPooling(last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=all_hidden_states, attentions=all_self_attentions)
@auto_docstring class CanineModel(CaninePreTrainedModel): def __init__(self, config, add_pooling_layer=True): ''' add_pooling_layer (bool, *optional*, defaults to `True`): Whether to add a pooling layer ''' pass def _prune_heads(self, heads_to_prune): ''' Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel ''' pass def _create_3d_attention_mask_from_input_mask(self, from_tensor, to_mask): ''' Create 3D attention mask from a 2D tensor mask. Args: from_tensor: 2D or 3D Tensor of shape [batch_size, from_seq_length, ...]. to_mask: int32 Tensor of shape [batch_size, to_seq_length]. Returns: float Tensor of shape [batch_size, from_seq_length, to_seq_length]. ''' pass def _downsample_attention_mask(self, char_attention_mask: torch.Tensor, downsampling_rate: int): '''Downsample 2D character attention mask to 2D molecule attention mask using MaxPool1d layer.''' pass def _repeat_molecules(self, molecules: torch.Tensor, char_seq_length: int) -> torch.Tensor: '''Repeats molecules to make them the same length as the char sequence.''' pass @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, CanineModelOutputWithPooling]: pass
9
5
43
6
26
11
4
0.41
1
12
6
0
6
8
6
7
271
40
164
66
140
67
84
54
77
19
2
1
26
1,111
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/canine/modeling_canine.py
transformers.models.canine.modeling_canine.CanineModelOutputWithPooling
from ...modeling_outputs import BaseModelOutput, ModelOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput from ...utils import auto_docstring, logging import torch from dataclasses import dataclass from typing import Optional, Union @dataclass @auto_docstring(custom_intro='\n Output type of [`CanineModel`]. Based on [`~modeling_outputs.BaseModelOutputWithPooling`], but with slightly\n different `hidden_states` and `attentions`, as these also include the hidden states and attentions of the shallow\n Transformer encoders.\n ') class CanineModelOutputWithPooling(ModelOutput): """ last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model (i.e. the output of the final shallow Transformer encoder). pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`): Hidden-state of the first token of the sequence (classification token) at the last layer of the deep Transformer encoder, further processed by a Linear layer and a Tanh activation function. The Linear layer weights are trained from the next sentence prediction (classification) objective during pretraining. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the input to each encoder + one for the output of each layer of each encoder) of shape `(batch_size, sequence_length, hidden_size)` and `(batch_size, sequence_length // config.downsampling_rate, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial input to each Transformer encoder. The hidden states of the shallow encoders have length `sequence_length`, but the hidden states of the deep encoder have length `sequence_length` // `config.downsampling_rate`. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of the 3 Transformer encoders of shape `(batch_size, num_heads, sequence_length, sequence_length)` and `(batch_size, num_heads, sequence_length // config.downsampling_rate, sequence_length // config.downsampling_rate)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ last_hidden_state: Optional[torch.FloatTensor] = None pooler_output: Optional[torch.FloatTensor] = None hidden_states: Optional[tuple[torch.FloatTensor]] = None attentions: Optional[tuple[torch.FloatTensor]] = None
@dataclass @auto_docstring(custom_intro='\n Output type of [`CanineModel`]. Based on [`~modeling_outputs.BaseModelOutputWithPooling`], but with slightly\n different `hidden_states` and `attentions`, as these also include the hidden states and attentions of the shallow\n Transformer encoders.\n ') class CanineModelOutputWithPooling(ModelOutput): ''' last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model (i.e. the output of the final shallow Transformer encoder). pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`): Hidden-state of the first token of the sequence (classification token) at the last layer of the deep Transformer encoder, further processed by a Linear layer and a Tanh activation function. The Linear layer weights are trained from the next sentence prediction (classification) objective during pretraining. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the input to each encoder + one for the output of each layer of each encoder) of shape `(batch_size, sequence_length, hidden_size)` and `(batch_size, sequence_length // config.downsampling_rate, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial input to each Transformer encoder. The hidden states of the shallow encoders have length `sequence_length`, but the hidden states of the deep encoder have length `sequence_length` // `config.downsampling_rate`. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of the 3 Transformer encoders of shape `(batch_size, num_heads, sequence_length, sequence_length)` and `(batch_size, num_heads, sequence_length // config.downsampling_rate, sequence_length // config.downsampling_rate)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. ''' pass
3
1
0
0
0
0
0
5
1
0
0
0
0
0
0
0
32
2
5
5
4
25
5
5
4
0
1
0
0
1,112
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/canine/modeling_canine.py
transformers.models.canine.modeling_canine.CanineOnlyMLMHead
from torch import nn import torch class CanineOnlyMLMHead(nn.Module): def __init__(self, config): super().__init__() self.predictions = CanineLMPredictionHead(config) def forward(self, sequence_output: tuple[torch.Tensor]) -> tuple[torch.Tensor]: prediction_scores = self.predictions(sequence_output) return prediction_scores
class CanineOnlyMLMHead(nn.Module): def __init__(self, config): pass def forward(self, sequence_output: tuple[torch.Tensor]) -> tuple[torch.Tensor]: pass
3
0
5
0
5
0
1
0
1
3
1
0
2
1
2
12
11
1
10
8
4
0
7
5
4
1
1
0
2
1,113
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/canine/modeling_canine.py
transformers.models.canine.modeling_canine.CanineOutput
import torch from torch import nn class CanineOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: tuple[torch.FloatTensor], input_tensor: torch.FloatTensor) -> torch.FloatTensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states
class CanineOutput(nn.Module): def __init__(self, config): pass def forward(self, hidden_states: tuple[torch.FloatTensor], input_tensor: torch.FloatTensor) -> torch.FloatTensor: pass
3
0
5
0
5
0
1
0
1
1
0
0
2
3
2
12
12
1
11
6
8
0
11
6
8
1
1
0
2
1,114
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/canine/modeling_canine.py
transformers.models.canine.modeling_canine.CaninePooler
from torch import nn import torch class CaninePooler(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.activation = nn.Tanh() def forward(self, hidden_states: tuple[torch.FloatTensor]) -> torch.FloatTensor: first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(first_token_tensor) pooled_output = self.activation(pooled_output) return pooled_output
class CaninePooler(nn.Module): def __init__(self, config): pass def forward(self, hidden_states: tuple[torch.FloatTensor]) -> torch.FloatTensor: pass
3
0
6
0
5
1
1
0.2
1
1
0
0
2
2
2
12
13
1
10
7
7
2
10
7
7
1
1
0
2
1,115
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/canine/modeling_canine.py
transformers.models.canine.modeling_canine.CaninePreTrainedModel
from ...utils import auto_docstring, logging from torch import nn from ...modeling_utils import PreTrainedModel from .configuration_canine import CanineConfig @auto_docstring class CaninePreTrainedModel(PreTrainedModel): config: CanineConfig base_model_prefix = 'canine' supports_gradient_checkpointing = True def _init_weights(self, module): """Initialize the weights""" if isinstance(module, (nn.Linear, nn.Conv1d)): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0)
@auto_docstring class CaninePreTrainedModel(PreTrainedModel): def _init_weights(self, module): '''Initialize the weights''' pass
3
1
15
0
12
3
6
0.41
1
0
0
5
1
0
1
1
26
2
17
6
15
7
15
6
13
6
1
2
6
1,116
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/canine/modeling_canine.py
transformers.models.canine.modeling_canine.CaninePredictionHeadTransform
import torch from torch import nn from ...activations import ACT2FN class CaninePredictionHeadTransform(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) if isinstance(config.hidden_act, str): self.transform_act_fn = ACT2FN[config.hidden_act] else: self.transform_act_fn = config.hidden_act self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) def forward(self, hidden_states: tuple[torch.FloatTensor]) -> torch.FloatTensor: hidden_states = self.dense(hidden_states) hidden_states = self.transform_act_fn(hidden_states) hidden_states = self.LayerNorm(hidden_states) return hidden_states
class CaninePredictionHeadTransform(nn.Module): def __init__(self, config): pass def forward(self, hidden_states: tuple[torch.FloatTensor]) -> torch.FloatTensor: pass
3
0
7
0
7
0
2
0
1
2
0
0
2
3
2
12
15
1
14
6
11
0
13
6
10
2
1
1
3
1,117
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/canine/modeling_canine.py
transformers.models.canine.modeling_canine.CanineSelfAttention
from typing import Optional, Union from torch import nn import math import torch class CanineSelfAttention(nn.Module): def __init__(self, config): super().__init__() if config.hidden_size % config.num_attention_heads != 0 and (not hasattr(config, 'embedding_size')): raise ValueError(f'The hidden size ({config.hidden_size}) is not a multiple of the number of attention heads ({config.num_attention_heads})') self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.position_embedding_type = getattr(config, 'position_embedding_type', 'absolute') if self.position_embedding_type == 'relative_key' or self.position_embedding_type == 'relative_key_query': self.max_position_embeddings = config.max_position_embeddings self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size) def forward(self, from_tensor: torch.Tensor, to_tensor: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False) -> tuple[torch.Tensor, Optional[torch.Tensor]]: batch_size, seq_length, _ = from_tensor.shape key_layer = self.key(to_tensor).view(batch_size, -1, self.num_attention_heads, self.attention_head_size).transpose(1, 2) value_layer = self.value(to_tensor).view(batch_size, -1, self.num_attention_heads, self.attention_head_size).transpose(1, 2) query_layer = self.query(from_tensor).view(batch_size, -1, self.num_attention_heads, self.attention_head_size).transpose(1, 2) attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) if self.position_embedding_type == 'relative_key' or self.position_embedding_type == 'relative_key_query': seq_length = from_tensor.size()[1] position_ids_l = torch.arange(seq_length, dtype=torch.long, device=from_tensor.device).view(-1, 1) position_ids_r = torch.arange(seq_length, dtype=torch.long, device=from_tensor.device).view(1, -1) distance = position_ids_l - position_ids_r positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1) positional_embedding = positional_embedding.to(dtype=query_layer.dtype) if self.position_embedding_type == 'relative_key': relative_position_scores = torch.einsum('bhld,lrd->bhlr', query_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores elif self.position_embedding_type == 'relative_key_query': relative_position_scores_query = torch.einsum('bhld,lrd->bhlr', query_layer, positional_embedding) relative_position_scores_key = torch.einsum('bhrd,lrd->bhlr', key_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key attention_scores = attention_scores / math.sqrt(self.attention_head_size) if attention_mask is not None: if attention_mask.ndim == 3: attention_mask = torch.unsqueeze(attention_mask, dim=1) attention_mask = (1.0 - attention_mask.float()) * torch.finfo(attention_scores.dtype).min attention_scores = attention_scores + attention_mask attention_probs = nn.functional.softmax(attention_scores, dim=-1) attention_probs = self.dropout(attention_probs) if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) return outputs
class CanineSelfAttention(nn.Module): def __init__(self, config): pass def forward(self, from_tensor: torch.Tensor, to_tensor: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False) -> tuple[torch.Tensor, Optional[torch.Tensor]]: pass
3
0
32
6
22
5
4
0.21
1
5
0
0
3
10
3
13
98
19
66
39
55
14
55
32
51
8
1
2
12
1,118
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/canine/modeling_canine.py
transformers.models.canine.modeling_canine.CanineSelfOutput
import torch from torch import nn class CanineSelfOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: tuple[torch.FloatTensor], input_tensor: torch.FloatTensor) -> tuple[torch.FloatTensor, torch.FloatTensor]: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states
class CanineSelfOutput(nn.Module): def __init__(self, config): pass def forward(self, hidden_states: tuple[torch.FloatTensor], input_tensor: torch.FloatTensor) -> tuple[torch.FloatTensor, torch.FloatTensor]: pass
3
0
6
0
6
0
1
0
1
1
0
0
2
3
2
12
14
1
13
8
8
0
11
6
8
1
1
0
2
1,119
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/canine/modeling_canine.py
transformers.models.canine.modeling_canine.CharactersToMolecules
import torch from ...activations import ACT2FN from torch import nn class CharactersToMolecules(nn.Module): """Convert character sequence to initial molecule sequence (i.e. downsample) using strided convolutions.""" def __init__(self, config): super().__init__() self.conv = nn.Conv1d(in_channels=config.hidden_size, out_channels=config.hidden_size, kernel_size=config.downsampling_rate, stride=config.downsampling_rate) self.activation = ACT2FN[config.hidden_act] self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) def forward(self, char_encoding: torch.Tensor) -> torch.Tensor: cls_encoding = char_encoding[:, 0:1, :] char_encoding = torch.transpose(char_encoding, 1, 2) downsampled = self.conv(char_encoding) downsampled = torch.transpose(downsampled, 1, 2) downsampled = self.activation(downsampled) downsampled_truncated = downsampled[:, 0:-1, :] result = torch.cat([cls_encoding, downsampled_truncated], dim=1) result = self.LayerNorm(result) return result
class CharactersToMolecules(nn.Module): '''Convert character sequence to initial molecule sequence (i.e. downsample) using strided convolutions.''' def __init__(self, config): pass def forward(self, char_encoding: torch.Tensor) -> torch.Tensor: pass
3
1
20
4
10
7
1
0.67
1
2
0
0
2
3
2
12
44
9
21
10
18
14
16
10
13
1
1
0
2
1,120
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/canine/modeling_canine.py
transformers.models.canine.modeling_canine.ConvProjection
from torch import nn from typing import Optional, Union import torch from ...activations import ACT2FN class ConvProjection(nn.Module): """ Project representations from hidden_size*2 back to hidden_size across a window of w = config.upsampling_kernel_size characters. """ def __init__(self, config): super().__init__() self.config = config self.conv = nn.Conv1d(in_channels=config.hidden_size * 2, out_channels=config.hidden_size, kernel_size=config.upsampling_kernel_size, stride=1) self.activation = ACT2FN[config.hidden_act] self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, inputs: torch.Tensor, final_seq_char_positions: Optional[torch.Tensor]=None) -> torch.Tensor: inputs = torch.transpose(inputs, 1, 2) pad_total = self.config.upsampling_kernel_size - 1 pad_beg = pad_total // 2 pad_end = pad_total - pad_beg pad = nn.ConstantPad1d((pad_beg, pad_end), 0) result = self.conv(pad(inputs)) result = torch.transpose(result, 1, 2) result = self.activation(result) result = self.LayerNorm(result) result = self.dropout(result) final_char_seq = result if final_seq_char_positions is not None: raise NotImplementedError('CanineForMaskedLM is currently not supported') else: query_seq = final_char_seq return query_seq
class ConvProjection(nn.Module): ''' Project representations from hidden_size*2 back to hidden_size across a window of w = config.upsampling_kernel_size characters. ''' def __init__(self, config): pass def forward(self, inputs: torch.Tensor, final_seq_char_positions: Optional[torch.Tensor]=None) -> torch.Tensor: pass
3
1
25
2
17
6
2
0.47
1
3
0
0
2
5
2
12
56
6
34
19
27
16
24
15
21
2
1
1
3
1,121
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/canine/tokenization_canine.py
transformers.models.canine.tokenization_canine.CanineTokenizer
from typing import Optional from ...tokenization_utils import AddedToken, PreTrainedTokenizer class CanineTokenizer(PreTrainedTokenizer): """ Construct a CANINE tokenizer (i.e. a character splitter). It turns text into a sequence of characters, and then converts each character into its Unicode code point. [`CanineTokenizer`] inherits from [`PreTrainedTokenizer`]. Refer to superclass [`PreTrainedTokenizer`] for usage examples and documentation concerning parameters. Args: model_max_length (`int`, *optional*, defaults to 2048): The maximum sentence length the model accepts. """ def __init__(self, bos_token=chr(CLS), eos_token=chr(SEP), sep_token=chr(SEP), cls_token=chr(CLS), pad_token=chr(PAD), mask_token=chr(MASK), add_prefix_space=False, model_max_length=2048, **kwargs): bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token sep_token = AddedToken(sep_token, lstrip=False, rstrip=False) if isinstance(sep_token, str) else sep_token cls_token = AddedToken(cls_token, lstrip=False, rstrip=False) if isinstance(cls_token, str) else cls_token pad_token = AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token self._special_codepoints: dict[str, int] = {} for codepoint, name in SPECIAL_CODEPOINTS.items(): self._special_codepoints[name] = codepoint self._special_codepoint_strings: dict[int, str] = {codepoint: name for name, codepoint in self._special_codepoints.items()} self._unicode_vocab_size = UNICODE_VOCAB_SIZE self._num_special_tokens = len(self._special_codepoints) super().__init__(bos_token=bos_token, eos_token=eos_token, sep_token=sep_token, cls_token=cls_token, pad_token=pad_token, mask_token=mask_token, add_prefix_space=add_prefix_space, model_max_length=model_max_length, **kwargs) @property def vocab_size(self) -> int: return self._unicode_vocab_size def get_vocab(self): vocab = {chr(i): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def _tokenize(self, text: str) -> list[str]: """Tokenize a string (i.e. perform character splitting).""" return list(text) def _convert_token_to_id(self, token: str) -> int: """Converts a token (i.e. a Unicode character) in an id (i.e. its integer Unicode code point value).""" try: return ord(token) except TypeError: raise ValueError(f"invalid token: '{token}'") def _convert_id_to_token(self, index: int) -> str: """ Converts a Unicode code point (integer) in a token (str). In case it's a special code point, convert to human-readable format. """ try: if index in SPECIAL_CODEPOINTS: return SPECIAL_CODEPOINTS[index] return chr(index) except TypeError: raise ValueError(f'invalid id: {index}') def convert_tokens_to_string(self, tokens): return ''.join(tokens) def build_inputs_with_special_tokens(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]: """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A CANINE sequence has the following format: - single sequence: `[CLS] X [SEP]` - pair of sequences: `[CLS] A [SEP] B [SEP]` Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ sep = [self.sep_token_id] cls = [self.cls_token_id] result = cls + token_ids_0 + sep if token_ids_1 is not None: result += token_ids_1 + sep return result def get_special_tokens_mask(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None, already_has_special_tokens: bool=False) -> list[int]: """ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` method. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True) result = [1] + [0] * len(token_ids_0) + [1] if token_ids_1 is not None: result += [0] * len(token_ids_1) + [1] return result def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None): return ()
class CanineTokenizer(PreTrainedTokenizer): ''' Construct a CANINE tokenizer (i.e. a character splitter). It turns text into a sequence of characters, and then converts each character into its Unicode code point. [`CanineTokenizer`] inherits from [`PreTrainedTokenizer`]. Refer to superclass [`PreTrainedTokenizer`] for usage examples and documentation concerning parameters. Args: model_max_length (`int`, *optional*, defaults to 2048): The maximum sentence length the model accepts. ''' def __init__(self, bos_token=chr(CLS), eos_token=chr(SEP), sep_token=chr(SEP), cls_token=chr(CLS), pad_token=chr(PAD), mask_token=chr(MASK), add_prefix_space=False, model_max_length=2048, **kwargs): pass @property def vocab_size(self) -> int: pass def get_vocab(self): pass def _tokenize(self, text: str) -> list[str]: '''Tokenize a string (i.e. perform character splitting).''' pass def _convert_token_to_id(self, token: str) -> int: '''Converts a token (i.e. a Unicode character) in an id (i.e. its integer Unicode code point value).''' pass def _convert_id_to_token(self, index: int) -> str: ''' Converts a Unicode code point (integer) in a token (str). In case it's a special code point, convert to human-readable format. ''' pass def convert_tokens_to_string(self, tokens): pass def build_inputs_with_special_tokens(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]: ''' Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A CANINE sequence has the following format: - single sequence: `[CLS] X [SEP]` - pair of sequences: `[CLS] A [SEP] B [SEP]` Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. ''' pass def get_special_tokens_mask(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None, already_has_special_tokens: bool=False) -> list[int]: ''' Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` method. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. ''' pass def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None): pass
12
6
14
2
8
5
2
0.66
1
8
0
0
11
4
11
100
184
31
92
43
62
61
60
25
48
8
3
2
25
1,122
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/chameleon/configuration_chameleon.py
transformers.models.chameleon.configuration_chameleon.ChameleonConfig
from ...configuration_utils import PretrainedConfig class ChameleonConfig(PretrainedConfig): """ This is the configuration class to store the configuration of a [`ChameleonModel`]. It is used to instantiate a chameleon model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the [meta/chameleon-7B](https://huggingface.co/meta/chameleon-7B). Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 65536): Vocabulary size of the chameleon model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`ChameleonModel`]; this includes text and image tokens. hidden_size (`int`, *optional*, defaults to 4096): Dimension of the hidden representations. intermediate_size (`int`, *optional*, defaults to 11008): Dimension of the MLP representations. num_hidden_layers (`int`, *optional*, defaults to 32): Number of hidden layers in the Transformer decoder. num_attention_heads (`int`, *optional*, defaults to 32): Number of attention heads for each attention layer in the Transformer decoder. num_key_value_heads (`int`, *optional*, defaults to 32): This is the number of key_value heads that should be used to implement Grouped Query Attention. If `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if `num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed by meanpooling all the original heads within that group. For more details, check out [this paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to `num_attention_heads`. hidden_act (`str` or `function`, *optional*, defaults to `"silu"`): The non-linear activation function (function or string) in the decoder. max_position_embeddings (`int`, *optional*, defaults to 4096): The maximum sequence length that this model might ever be used with. Chameleon supports up to 4096 tokens. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. rms_norm_eps (`float`, *optional*, defaults to 1e-05): The epsilon used by the rms normalization layers. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). Only relevant if `config.is_decoder=True`. pad_token_id (`int`, *optional*): Padding token id. bos_token_id (`int`, *optional*, defaults to 1): Beginning of stream token id. eos_token_id (`int`, *optional*, defaults to 2): End of stream token id. tie_word_embeddings (`bool`, *optional*, defaults to `False`): Whether to tie weight embeddings rope_theta (`float`, *optional*, defaults to 10000.0): The base period of the RoPE embeddings. rope_scaling (`Dict`, *optional*): Dictionary containing the scaling configuration for the RoPE embeddings. Currently supports two scaling strategies: linear and dynamic. Their scaling factor must be a float greater than 1. The expected format is `{"type": strategy name, "factor": scaling factor}`. When using this flag, don't update `max_position_embeddings` to the expected new maximum. See the following thread for more information on how these scaling strategies behave: https://www.reddit.com/r/Localchameleon/comments/14mrgpr/dynamically_scaled_rope_further_increases/. This is an experimental feature, subject to breaking API changes in future versions. attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`): Whether to use a bias in the query, key, value and output projection layers during self-attention. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. model_parallel_size (`int`, *optional*, defaults to 1): Number of shards used when training the model. This will be used in qk layernorm because the original Chameleon inference doesn't do reduction in those layers and each rank has its own biases. swin_norm (`bool`, *optional*, defaults to `False`): Use Swin Transformer normalization. vq_config (`dict`, *optional*): ChameleonVQConfig instance containing the configuration for the VQ-VAE model. vocabulary_map (`dict`, *optional*): A dictionary containing the vocabulary map from the tokenizer. Used to obtain tokens from the image inputs. mlp_bias (`bool`, *optional*, defaults to `False`): Whether to use a bias in up_proj, down_proj and gate_proj layers in the MLP layers. ```python >>> from transformers import ChameleonModel, ChameleonConfig >>> # Initializing a chameleon chameleon-7b style configuration >>> configuration = ChameleonConfig() >>> # Initializing a model from the chameleon-7b style configuration >>> model = ChameleonModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = 'chameleon' sub_configs = {'vq_config': ChameleonVQVAEConfig} keys_to_ignore_at_inference = ['past_key_values'] def __init__(self, vocab_size=65536, hidden_size=4096, intermediate_size=11008, num_hidden_layers=32, num_attention_heads=32, num_key_value_heads=32, hidden_act='silu', max_position_embeddings=4096, initializer_range=0.02, rms_norm_eps=1e-05, use_cache=True, pad_token_id=None, bos_token_id=1, eos_token_id=2, tie_word_embeddings=False, rope_theta=10000.0, rope_scaling=None, attention_bias=False, attention_dropout=0.0, model_parallel_size=1, swin_norm=False, vq_config=None, vocabulary_map=None, mlp_bias=False, **kwargs): self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.mlp_bias = mlp_bias self.num_key_value_heads = num_key_value_heads self.hidden_act = hidden_act self.initializer_range = initializer_range self.rms_norm_eps = rms_norm_eps self.use_cache = use_cache self.rope_theta = rope_theta self.rope_scaling = rope_scaling self._rope_scaling_validation() self.attention_bias = attention_bias self.attention_dropout = attention_dropout self.model_parallel_size = model_parallel_size self.swin_norm = swin_norm if vq_config is None: vq_config = {} logger.info('vq_config is None. initializing the ChameleonVQConfig with default values.') self.vq_config = ChameleonVQVAEConfig(**vq_config) self.vocabulary_map = vocabulary_map self.image_token_id = vocabulary_map.get('<image>') if vocabulary_map is not None else None super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs) def _rope_scaling_validation(self): """ Validate the `rope_scaling` configuration. """ if self.rope_scaling is None: return if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 2: raise ValueError(f'`rope_scaling` must be a dictionary with with two fields, `type` and `factor`, got {self.rope_scaling}') rope_scaling_type = self.rope_scaling.get('type', None) rope_scaling_factor = self.rope_scaling.get('factor', None) if rope_scaling_type is None or rope_scaling_type not in ['linear', 'dynamic']: raise ValueError(f"`rope_scaling`'s type field must be one of ['linear', 'dynamic'], got {rope_scaling_type}") if rope_scaling_factor is None or not isinstance(rope_scaling_factor, float) or rope_scaling_factor <= 1.0: raise ValueError(f"`rope_scaling`'s factor field must be a float > 1, got {rope_scaling_factor}")
class ChameleonConfig(PretrainedConfig): ''' This is the configuration class to store the configuration of a [`ChameleonModel`]. It is used to instantiate a chameleon model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the [meta/chameleon-7B](https://huggingface.co/meta/chameleon-7B). Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 65536): Vocabulary size of the chameleon model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`ChameleonModel`]; this includes text and image tokens. hidden_size (`int`, *optional*, defaults to 4096): Dimension of the hidden representations. intermediate_size (`int`, *optional*, defaults to 11008): Dimension of the MLP representations. num_hidden_layers (`int`, *optional*, defaults to 32): Number of hidden layers in the Transformer decoder. num_attention_heads (`int`, *optional*, defaults to 32): Number of attention heads for each attention layer in the Transformer decoder. num_key_value_heads (`int`, *optional*, defaults to 32): This is the number of key_value heads that should be used to implement Grouped Query Attention. If `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if `num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed by meanpooling all the original heads within that group. For more details, check out [this paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to `num_attention_heads`. hidden_act (`str` or `function`, *optional*, defaults to `"silu"`): The non-linear activation function (function or string) in the decoder. max_position_embeddings (`int`, *optional*, defaults to 4096): The maximum sequence length that this model might ever be used with. Chameleon supports up to 4096 tokens. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. rms_norm_eps (`float`, *optional*, defaults to 1e-05): The epsilon used by the rms normalization layers. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). Only relevant if `config.is_decoder=True`. pad_token_id (`int`, *optional*): Padding token id. bos_token_id (`int`, *optional*, defaults to 1): Beginning of stream token id. eos_token_id (`int`, *optional*, defaults to 2): End of stream token id. tie_word_embeddings (`bool`, *optional*, defaults to `False`): Whether to tie weight embeddings rope_theta (`float`, *optional*, defaults to 10000.0): The base period of the RoPE embeddings. rope_scaling (`Dict`, *optional*): Dictionary containing the scaling configuration for the RoPE embeddings. Currently supports two scaling strategies: linear and dynamic. Their scaling factor must be a float greater than 1. The expected format is `{"type": strategy name, "factor": scaling factor}`. When using this flag, don't update `max_position_embeddings` to the expected new maximum. See the following thread for more information on how these scaling strategies behave: https://www.reddit.com/r/Localchameleon/comments/14mrgpr/dynamically_scaled_rope_further_increases/. This is an experimental feature, subject to breaking API changes in future versions. attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`): Whether to use a bias in the query, key, value and output projection layers during self-attention. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. model_parallel_size (`int`, *optional*, defaults to 1): Number of shards used when training the model. This will be used in qk layernorm because the original Chameleon inference doesn't do reduction in those layers and each rank has its own biases. swin_norm (`bool`, *optional*, defaults to `False`): Use Swin Transformer normalization. vq_config (`dict`, *optional*): ChameleonVQConfig instance containing the configuration for the VQ-VAE model. vocabulary_map (`dict`, *optional*): A dictionary containing the vocabulary map from the tokenizer. Used to obtain tokens from the image inputs. mlp_bias (`bool`, *optional*, defaults to `False`): Whether to use a bias in up_proj, down_proj and gate_proj layers in the MLP layers. ```python >>> from transformers import ChameleonModel, ChameleonConfig >>> # Initializing a chameleon chameleon-7b style configuration >>> configuration = ChameleonConfig() >>> # Initializing a model from the chameleon-7b style configuration >>> model = ChameleonModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```''' def __init__(self, vocab_size=65536, hidden_size=4096, intermediate_size=11008, num_hidden_layers=32, num_attention_heads=32, num_key_value_heads=32, hidden_act='silu', max_position_embeddings=4096, initializer_range=0.02, rms_norm_eps=1e-05, use_cache=True, pad_token_id=None, bos_token_id=1, eos_token_id=2, tie_word_embeddings=False, rope_theta=10000.0, rope_scaling=None, attention_bias=False, attention_dropout=0.0, model_parallel_size=1, swin_norm=False, vq_config=None, vocabulary_map=None, mlp_bias=False, **kwargs): pass def _rope_scaling_validation(self): ''' Validate the `rope_scaling` configuration. ''' pass
3
2
42
3
38
2
4
1.05
1
5
1
0
2
20
2
2
179
17
79
55
49
83
41
28
38
5
1
1
7
1,123
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/chameleon/configuration_chameleon.py
transformers.models.chameleon.configuration_chameleon.ChameleonVQVAEConfig
from ...configuration_utils import PretrainedConfig from typing import Optional class ChameleonVQVAEConfig(PretrainedConfig): """ This is the configuration class to store the configuration of a [`ChameleonVQModel`]. It is used to instantiate a `ChameleonVQModel` according to the specified arguments, defining the model architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Instantiating a configuration with the defaults will yield a similar configuration to the VQModel of the [meta/chameleon-7B](https://huggingface.co/meta/chameleon-7B). Args: embed_dim (`int`, *optional*, defaults to 256): Dimensionality of each embedding vector. num_embeddings (`int`, *optional*, defaults to 8192): Number of codebook embeddings. double_latent (`bool`, *optional*, defaults to `False`): Whether to use double z channels. latent_channels (`int`, *optional*, defaults to 256): Number of channels for the latent space. resolution (`int`, *optional*, defaults to 512): Resolution of the input images. in_channels (`int`, *optional*, defaults to 3): Number of input channels. base_channels (`int`, *optional*, defaults to 128): Base channel count. channel_multiplier (`list[int]`, *optional*, defaults to `[1, 1, 2, 2, 4]`): Channel multipliers for each resolution. num_res_blocks (`int`, *optional*, defaults to 2): Number of residual blocks. attn_resolutions (`list[int]`, *optional*): Resolutions to apply attention. dropout (`float`, *optional*, defaults to 0.0): Dropout rate. attn_type (`str`, *optional*, defaults to `"vanilla"`): Attention type used in VQ-GAN encoder. Can be "vanilla" or None. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. """ model_type = 'chameleon_vqgan' base_config_key = 'vq_config' def __init__(self, embed_dim: int=256, num_embeddings: int=8192, double_latent: bool=False, latent_channels: int=256, resolution: int=512, in_channels: int=3, base_channels: int=128, channel_multiplier: list[int]=[1, 1, 2, 2, 4], num_res_blocks: int=2, attn_resolutions: Optional[list[int]]=None, dropout: float=0.0, attn_type: str='vanilla', initializer_range=0.02, **kwargs): super().__init__(**kwargs) self.embed_dim = embed_dim self.num_embeddings = num_embeddings self.double_latent = double_latent self.latent_channels = latent_channels self.resolution = resolution self.in_channels = in_channels self.base_channels = base_channels self.channel_multiplier = channel_multiplier self.num_res_blocks = num_res_blocks self.attn_resolutions = attn_resolutions self.dropout = dropout self.attn_type = attn_type self.initializer_range = initializer_range
class ChameleonVQVAEConfig(PretrainedConfig): ''' This is the configuration class to store the configuration of a [`ChameleonVQModel`]. It is used to instantiate a `ChameleonVQModel` according to the specified arguments, defining the model architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Instantiating a configuration with the defaults will yield a similar configuration to the VQModel of the [meta/chameleon-7B](https://huggingface.co/meta/chameleon-7B). Args: embed_dim (`int`, *optional*, defaults to 256): Dimensionality of each embedding vector. num_embeddings (`int`, *optional*, defaults to 8192): Number of codebook embeddings. double_latent (`bool`, *optional*, defaults to `False`): Whether to use double z channels. latent_channels (`int`, *optional*, defaults to 256): Number of channels for the latent space. resolution (`int`, *optional*, defaults to 512): Resolution of the input images. in_channels (`int`, *optional*, defaults to 3): Number of input channels. base_channels (`int`, *optional*, defaults to 128): Base channel count. channel_multiplier (`list[int]`, *optional*, defaults to `[1, 1, 2, 2, 4]`): Channel multipliers for each resolution. num_res_blocks (`int`, *optional*, defaults to 2): Number of residual blocks. attn_resolutions (`list[int]`, *optional*): Resolutions to apply attention. dropout (`float`, *optional*, defaults to 0.0): Dropout rate. attn_type (`str`, *optional*, defaults to `"vanilla"`): Attention type used in VQ-GAN encoder. Can be "vanilla" or None. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. ''' def __init__(self, embed_dim: int=256, num_embeddings: int=8192, double_latent: bool=False, latent_channels: int=256, resolution: int=512, in_channels: int=3, base_channels: int=128, channel_multiplier: list[int]=[1, 1, 2, 2, 4], num_res_blocks: int=2, attn_resolutions: Optional[list[int]]=None, dropout: float=0.0, attn_type: str='vanilla', initializer_range=0.02, **kwargs): pass
2
1
31
0
31
0
1
1.03
1
5
0
0
1
13
1
1
72
3
34
33
16
35
18
17
16
1
1
0
1
1,124
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/chameleon/image_processing_chameleon.py
transformers.models.chameleon.image_processing_chameleon.ChameleonImageProcessor
from ...utils import TensorType, filter_out_non_signature_kwargs, is_vision_available, logging import numpy as np from ...image_transforms import get_resize_output_image_size, resize, to_channel_dimension_format from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from typing import Optional, Union from ...image_utils import ChannelDimension, ImageInput, PILImageResampling, infer_channel_dimension_format, is_scaled_image, make_flat_list_of_images, to_numpy_array, valid_images, validate_preprocess_arguments class ChameleonImageProcessor(BaseImageProcessor): """ Constructs a Chameleon image processor. Args: do_resize (`bool`, *optional*, defaults to `True`): Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by `do_resize` in the `preprocess` method. size (`dict[str, int]` *optional*, defaults to `{"shortest_edge": 512}`): Size of the image after resizing. The shortest edge of the image is resized to size["shortest_edge"], with the longest edge resized to keep the input aspect ratio. Can be overridden by `size` in the `preprocess` method. resample (`PILImageResampling`, *optional*, defaults to 1): Resampling filter to use if resizing the image. Can be overridden by `resample` in the `preprocess` method. do_center_crop (`bool`, *optional*, defaults to `True`): Whether to center crop the image to the specified `crop_size`. Can be overridden by `do_center_crop` in the `preprocess` method. crop_size (`dict[str, int]` *optional*, defaults to {"height": 512, "width": 512}): Size of the output image after applying `center_crop`. Can be overridden by `crop_size` in the `preprocess` method. do_rescale (`bool`, *optional*, defaults to `True`): Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by `do_rescale` in the `preprocess` method. rescale_factor (`int` or `float`, *optional*, defaults to 0.0078): Scale factor to use if rescaling the image. Can be overridden by `rescale_factor` in the `preprocess` method. do_normalize (`bool`, *optional*, defaults to `True`): Whether to normalize the image. Can be overridden by `do_normalize` in the `preprocess` method. image_mean (`float` or `list[float]`, *optional*, defaults to `[1.0, 1.0, 1.0]`): Mean to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. image_std (`float` or `list[float]`, *optional*, defaults to `[1.0, 1.0, 1.0]`): Standard deviation to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method. Can be overridden by the `image_std` parameter in the `preprocess` method. do_convert_rgb (`bool`, *optional*, defaults to `True`): Whether to convert the image to RGB. """ model_input_names = ['pixel_values'] def __init__(self, do_resize: bool=True, size: Optional[dict[str, int]]=None, resample: PILImageResampling=PIL.Image.LANCZOS, do_center_crop: bool=True, crop_size: Optional[dict[str, int]]=None, do_rescale: bool=True, rescale_factor: Union[int, float]=0.0078, do_normalize: bool=True, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, do_convert_rgb: bool=True, **kwargs) -> None: super().__init__(**kwargs) size = size if size is not None else {'shortest_edge': 512} size = get_size_dict(size, default_to_square=False) crop_size = crop_size if crop_size is not None else {'height': 512, 'width': 512} crop_size = get_size_dict(crop_size, default_to_square=True, param_name='crop_size') self.do_resize = do_resize self.size = size self.resample = resample self.do_center_crop = do_center_crop self.crop_size = crop_size self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_normalize = do_normalize self.image_mean = image_mean if image_mean is not None else [1.0, 1.0, 1.0] self.image_std = image_std if image_std is not None else [1.0, 1.0, 1.0] self.do_convert_rgb = do_convert_rgb def resize(self, image: np.ndarray, size: dict[str, int], resample: PILImageResampling=PILImageResampling.BICUBIC, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray: """ Resize an image. The shortest edge of the image is resized to size["shortest_edge"], with the longest edge resized to keep the input aspect ratio. Args: image (`np.ndarray`): Image to resize. size (`dict[str, int]`): Size of the output image. resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`): Resampling filter to use when resiizing the image. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the image. If not provided, it will be the same as the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred. """ default_to_square = True if 'shortest_edge' in size: size = size['shortest_edge'] default_to_square = False elif 'height' in size and 'width' in size: size = (size['height'], size['width']) else: raise ValueError("Size must contain either 'shortest_edge' or 'height' and 'width'.") output_size = get_resize_output_image_size(image, size=size, default_to_square=default_to_square, input_data_format=input_data_format) return resize(image, size=output_size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs) @filter_out_non_signature_kwargs() def preprocess(self, images: ImageInput, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, resample: Optional[PILImageResampling]=None, do_center_crop: Optional[bool]=None, crop_size: Optional[int]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, do_convert_rgb: Optional[bool]=None, return_tensors: Optional[Union[str, TensorType]]=None, data_format: Optional[ChannelDimension]=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> PIL.Image.Image: """ Preprocess an image or batch of images. Args: images (`ImageInput`): Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If passing in images with pixel values between 0 and 1, set `do_rescale=False`. do_resize (`bool`, *optional*, defaults to `self.do_resize`): Whether to resize the image. size (`dict[str, int]`, *optional*, defaults to `self.size`): Size of the image after resizing. Shortest edge of the image is resized to size["shortest_edge"], with the longest edge resized to keep the input aspect ratio. resample (`int`, *optional*, defaults to `self.resample`): Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`. Only has an effect if `do_resize` is set to `True`. do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`): Whether to center crop the image. crop_size (`dict[str, int]`, *optional*, defaults to `self.crop_size`): Size of the center crop. Only has an effect if `do_center_crop` is set to `True`. do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): Whether to rescale the image. rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`): Rescale factor to rescale the image by if `do_rescale` is set to `True`. do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): Whether to normalize the image. image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`): Image mean to use for normalization. Only has an effect if `do_normalize` is set to `True`. image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`): Image standard deviation to use for normalization. Only has an effect if `do_normalize` is set to `True`. do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`): Whether to convert the image to RGB. return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Can be one of: - Unset: Return a list of `np.ndarray`. - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format for the output image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - Unset: Use the channel dimension format of the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. """ do_resize = do_resize if do_resize is not None else self.do_resize size = size if size is not None else self.size size = get_size_dict(size, param_name='size', default_to_square=False) resample = resample if resample is not None else self.resample do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop crop_size = crop_size if crop_size is not None else self.crop_size crop_size = get_size_dict(crop_size, param_name='crop_size', default_to_square=True) do_rescale = do_rescale if do_rescale is not None else self.do_rescale rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor do_normalize = do_normalize if do_normalize is not None else self.do_normalize image_mean = image_mean if image_mean is not None else self.image_mean image_std = image_std if image_std is not None else self.image_std do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb images = self.fetch_images(images) images = make_flat_list_of_images(images) if not valid_images(images): raise ValueError('Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, or torch.Tensor') validate_preprocess_arguments(do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_center_crop=do_center_crop, crop_size=crop_size, do_resize=do_resize, size=size, resample=resample) if do_convert_rgb: images = [self.blend_rgba(image) for image in images] images = [to_numpy_array(image) for image in images] if do_rescale and is_scaled_image(images[0]): logger.warning_once('It looks like you are trying to rescale already rescaled images. If the input images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again.') if input_data_format is None: input_data_format = infer_channel_dimension_format(images[0]) all_images = [] for image in images: if do_resize: image = self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format) if do_center_crop: image = self.center_crop(image=image, size=crop_size, input_data_format=input_data_format) if do_rescale: image = self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format) if do_normalize: image = self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format) all_images.append(image) images = [to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in all_images] data = {'pixel_values': images} return BatchFeature(data=data, tensor_type=return_tensors) def blend_rgba(self, image: ImageInput) -> ImageInput: """ Convert image to RGB by blending the transparency layer if it's in RGBA format. If image is not `PIL.Image`, it si simply returned without modifications. Args: image (`ImageInput`): Image to convert. """ if not isinstance(image, PIL.Image.Image): return image elif image.mode == 'RGB': return image img_rgba = np.array(image.convert('RGBA')) if not (img_rgba[:, :, 3] < 255).any(): return image.convert('RGB') alpha = img_rgba[:, :, 3] / 255.0 img_rgb = (1 - alpha[:, :, np.newaxis]) * 255 + alpha[:, :, np.newaxis] * img_rgba[:, :, :3] return PIL.Image.fromarray(img_rgb.astype('uint8'), 'RGB')
class ChameleonImageProcessor(BaseImageProcessor): ''' Constructs a Chameleon image processor. Args: do_resize (`bool`, *optional*, defaults to `True`): Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by `do_resize` in the `preprocess` method. size (`dict[str, int]` *optional*, defaults to `{"shortest_edge": 512}`): Size of the image after resizing. The shortest edge of the image is resized to size["shortest_edge"], with the longest edge resized to keep the input aspect ratio. Can be overridden by `size` in the `preprocess` method. resample (`PILImageResampling`, *optional*, defaults to 1): Resampling filter to use if resizing the image. Can be overridden by `resample` in the `preprocess` method. do_center_crop (`bool`, *optional*, defaults to `True`): Whether to center crop the image to the specified `crop_size`. Can be overridden by `do_center_crop` in the `preprocess` method. crop_size (`dict[str, int]` *optional*, defaults to {"height": 512, "width": 512}): Size of the output image after applying `center_crop`. Can be overridden by `crop_size` in the `preprocess` method. do_rescale (`bool`, *optional*, defaults to `True`): Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by `do_rescale` in the `preprocess` method. rescale_factor (`int` or `float`, *optional*, defaults to 0.0078): Scale factor to use if rescaling the image. Can be overridden by `rescale_factor` in the `preprocess` method. do_normalize (`bool`, *optional*, defaults to `True`): Whether to normalize the image. Can be overridden by `do_normalize` in the `preprocess` method. image_mean (`float` or `list[float]`, *optional*, defaults to `[1.0, 1.0, 1.0]`): Mean to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. image_std (`float` or `list[float]`, *optional*, defaults to `[1.0, 1.0, 1.0]`): Standard deviation to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method. Can be overridden by the `image_std` parameter in the `preprocess` method. do_convert_rgb (`bool`, *optional*, defaults to `True`): Whether to convert the image to RGB. ''' def __init__(self, do_resize: bool=True, size: Optional[dict[str, int]]=None, resample: PILImageResampling=PIL.Image.LANCZOS, do_center_crop: bool=True, crop_size: Optional[dict[str, int]]=None, do_rescale: bool=True, rescale_factor: Union[int, float]=0.0078, do_normalize: bool=True, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, do_convert_rgb: bool=True, **kwargs) -> None: pass def resize(self, image: np.ndarray, size: dict[str, int], resample: PILImageResampling=PILImageResampling.BICUBIC, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray: ''' Resize an image. The shortest edge of the image is resized to size["shortest_edge"], with the longest edge resized to keep the input aspect ratio. Args: image (`np.ndarray`): Image to resize. size (`dict[str, int]`): Size of the output image. resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`): Resampling filter to use when resiizing the image. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the image. If not provided, it will be the same as the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred. ''' pass @filter_out_non_signature_kwargs() def preprocess(self, images: ImageInput, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, resample: Optional[PILImageResampling]=None, do_center_crop: Optional[bool]=None, crop_size: Optional[int]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, do_convert_rgb: Optional[bool]=None, return_tensors: Optional[Union[str, TensorType]]=None, data_format: Optional[ChannelDimension]=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> PIL.Image.Image: ''' Preprocess an image or batch of images. Args: images (`ImageInput`): Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If passing in images with pixel values between 0 and 1, set `do_rescale=False`. do_resize (`bool`, *optional*, defaults to `self.do_resize`): Whether to resize the image. size (`dict[str, int]`, *optional*, defaults to `self.size`): Size of the image after resizing. Shortest edge of the image is resized to size["shortest_edge"], with the longest edge resized to keep the input aspect ratio. resample (`int`, *optional*, defaults to `self.resample`): Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`. Only has an effect if `do_resize` is set to `True`. do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`): Whether to center crop the image. crop_size (`dict[str, int]`, *optional*, defaults to `self.crop_size`): Size of the center crop. Only has an effect if `do_center_crop` is set to `True`. do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): Whether to rescale the image. rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`): Rescale factor to rescale the image by if `do_rescale` is set to `True`. do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): Whether to normalize the image. image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`): Image mean to use for normalization. Only has an effect if `do_normalize` is set to `True`. image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`): Image standard deviation to use for normalization. Only has an effect if `do_normalize` is set to `True`. do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`): Whether to convert the image to RGB. return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Can be one of: - Unset: Return a list of `np.ndarray`. - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format for the output image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - Unset: Use the channel dimension format of the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. ''' pass def blend_rgba(self, image: ImageInput) -> ImageInput: ''' Convert image to RGB by blending the transparency layer if it's in RGBA format. If image is not `PIL.Image`, it si simply returned without modifications. Args: image (`ImageInput`): Image to convert. ''' pass
6
4
62
5
38
19
8
0.74
1
8
2
0
4
11
4
24
295
27
154
64
109
114
77
24
72
21
3
2
33
1,125
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/chameleon/modeling_chameleon.py
transformers.models.chameleon.modeling_chameleon.ChameleonAttention
from typing import Callable, Optional, Union import torch from .configuration_chameleon import ChameleonConfig, ChameleonVQVAEConfig from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel import torch.nn.functional as F from torch import nn from ...cache_utils import Cache, DynamicCache from ...utils.deprecation import deprecate_kwarg class ChameleonAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__(self, config: ChameleonConfig, layer_idx: Optional[int]=None): super().__init__() self.config = config self.layer_idx = layer_idx if layer_idx is None: logger.warning_once(f'Instantiating {self.__class__.__name__} without passing a `layer_idx` is not recommended and will lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` when creating this class.') self.attention_dropout = config.attention_dropout self.hidden_size = config.hidden_size self.num_heads = config.num_attention_heads self.head_dim = self.hidden_size // self.num_heads self.num_key_value_heads = config.num_key_value_heads self.num_key_value_groups = self.num_heads // self.num_key_value_heads self.max_position_embeddings = config.max_position_embeddings self.rope_theta = config.rope_theta self.is_causal = True self.model_parallel_size = config.model_parallel_size self.scaling = self.head_dim ** (-0.5) if self.head_dim * self.num_heads != self.hidden_size: raise ValueError(f'hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size} and `num_heads`: {self.num_heads}).') self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=config.attention_bias) self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.attention_bias) self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.attention_bias) self.o_proj = nn.Linear(self.hidden_size, self.hidden_size, bias=config.attention_bias) self.q_norm = ChameleonLayerNorm((self.num_heads, self.head_dim)) self.k_norm = ChameleonLayerNorm((self.num_key_value_heads, self.head_dim)) self._init_rope() def _init_rope(self): if self.config.rope_scaling is None: self.rotary_emb = ChameleonRotaryEmbedding(self.head_dim, max_position_embeddings=self.max_position_embeddings, base=self.rope_theta) else: scaling_type = self.config.rope_scaling['type'] scaling_factor = self.config.rope_scaling['factor'] if scaling_type == 'linear': self.rotary_emb = ChameleonLinearScalingRotaryEmbedding(self.head_dim, max_position_embeddings=self.max_position_embeddings, scaling_factor=scaling_factor, base=self.rope_theta) elif scaling_type == 'dynamic': self.rotary_emb = ChameleonDynamicNTKScalingRotaryEmbedding(self.head_dim, max_position_embeddings=self.max_position_embeddings, scaling_factor=scaling_factor, base=self.rope_theta) else: raise ValueError(f'Unknown RoPE scaling type {scaling_type}') @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, output_attentions: bool=False, use_cache: bool=False, cache_position: Optional[torch.LongTensor]=None, **kwargs) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: bsz, q_len, _ = hidden_states.size() query_states = self.q_proj(hidden_states) key_states = self.k_proj(hidden_states) value_states = self.v_proj(hidden_states) query_states = query_states.reshape(-1, self.num_heads, self.head_dim) query_states = self.q_norm(query_states) key_states = key_states.reshape(-1, self.num_key_value_heads, self.head_dim) key_states = self.k_norm(key_states) query_states = query_states.reshape(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) key_states = key_states.reshape(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) cos, sin = self.rotary_emb(value_states, position_ids) query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) if past_key_values is not None: cache_kwargs = {'sin': sin, 'cos': cos, 'cache_position': cache_position} key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs) attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != 'eager': attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] attn_output, attn_weights = attention_interface(self, query_states, key_states, value_states, attention_mask, dropout=0.0 if not self.training else self.attention_dropout, scaling=self.scaling, **kwargs) attn_output = attn_output.reshape(bsz, q_len, -1).contiguous() attn_output = self.o_proj(attn_output) return (attn_output, attn_weights)
class ChameleonAttention(nn.Module): '''Multi-headed attention from 'Attention Is All You Need' paper''' def __init__(self, config: ChameleonConfig, layer_idx: Optional[int]=None): pass def _init_rope(self): pass @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, output_attentions: bool=False, use_cache: bool=False, cache_position: Optional[torch.LongTensor]=None, **kwargs) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: pass
5
1
41
6
35
1
4
0.06
1
11
6
2
3
19
3
13
131
20
106
44
92
6
69
34
65
5
1
2
12
1,126
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/chameleon/modeling_chameleon.py
transformers.models.chameleon.modeling_chameleon.ChameleonDecoderLayer
import torch.nn.functional as F from ...utils.deprecation import deprecate_kwarg from ...modeling_layers import GradientCheckpointingLayer from typing import Callable, Optional, Union from ...cache_utils import Cache, DynamicCache from .configuration_chameleon import ChameleonConfig, ChameleonVQVAEConfig import torch class ChameleonDecoderLayer(GradientCheckpointingLayer): def __init__(self, config: ChameleonConfig, layer_idx: int): super().__init__() self.hidden_size = config.hidden_size self.self_attn = ChameleonAttention(config=config, layer_idx=layer_idx) self.mlp = ChameleonMLP(config) self.input_layernorm = ChameleonRMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.post_attention_layernorm = ChameleonRMSNorm(config.hidden_size, eps=config.rms_norm_eps) @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, output_attentions: Optional[bool]=False, use_cache: Optional[bool]=False, cache_position: Optional[torch.LongTensor]=None, **kwargs) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`, *optional*): attention mask of size `(batch_size, sequence_length)` if flash attention is used or `(batch_size, 1, query_sequence_length, key_sequence_length)` if default attention is used. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). past_key_values (`Cache`, *optional*): cached past key and value projection states cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*): Indices depicting the position of the input sequence tokens in the sequence kwargs (`dict`, *optional*): Arbitrary kwargs to be ignored, used for FSDP and other methods that injects code into the model """ residual = hidden_states hidden_states = self.input_layernorm(hidden_states) hidden_states, self_attn_weights = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position, **kwargs) hidden_states = residual + hidden_states residual = hidden_states hidden_states = self.post_attention_layernorm(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = residual + hidden_states outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights,) return outputs
class ChameleonDecoderLayer(GradientCheckpointingLayer): def __init__(self, config: ChameleonConfig, layer_idx: int): pass @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, output_attentions: Optional[bool]=False, use_cache: Optional[bool]=False, cache_position: Optional[torch.LongTensor]=None, **kwargs) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]: ''' Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`, *optional*): attention mask of size `(batch_size, sequence_length)` if flash attention is used or `(batch_size, 1, query_sequence_length, key_sequence_length)` if default attention is used. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). past_key_values (`Cache`, *optional*): cached past key and value projection states cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*): Indices depicting the position of the input sequence tokens in the sequence kwargs (`dict`, *optional*): Arbitrary kwargs to be ignored, used for FSDP and other methods that injects code into the model ''' pass
4
1
36
5
21
11
2
0.5
1
8
4
0
2
5
2
12
73
10
42
21
29
21
23
11
20
3
1
1
4
1,127
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/chameleon/modeling_chameleon.py
transformers.models.chameleon.modeling_chameleon.ChameleonDynamicNTKScalingRotaryEmbedding
import torch import torch.nn.functional as F class ChameleonDynamicNTKScalingRotaryEmbedding(ChameleonRotaryEmbedding): """ChameleonRotaryEmbedding extended with Dynamic NTK scaling. Credits to the Reddit users /u/bloc97 and /u/emozilla""" def forward(self, x, position_ids): seq_len = torch.max(position_ids) + 1 if seq_len > self.max_position_embeddings: base = self.base * (self.scaling_factor * seq_len / self.max_position_embeddings - (self.scaling_factor - 1)) ** (self.dim / (self.dim - 2)) inv_freq = 1.0 / base ** (torch.arange(0, self.dim, 2, dtype=torch.int64).to(device=x.device, dtype=torch.float) / self.dim) self.register_buffer('inv_freq', inv_freq, persistent=False) cos, sin = super().forward(x, position_ids) return (cos, sin)
class ChameleonDynamicNTKScalingRotaryEmbedding(ChameleonRotaryEmbedding): '''ChameleonRotaryEmbedding extended with Dynamic NTK scaling. Credits to the Reddit users /u/bloc97 and /u/emozilla''' def forward(self, x, position_ids): pass
2
1
14
1
12
2
2
0.23
1
1
0
0
1
1
1
13
17
2
13
7
11
3
9
6
7
2
2
1
2
1,128
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/chameleon/modeling_chameleon.py
transformers.models.chameleon.modeling_chameleon.ChameleonForConditionalGeneration
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple, logging from ...processing_utils import Unpack from ...cache_utils import Cache, DynamicCache from torch import nn from ...generation import GenerationMixin import torch.nn.functional as F import torch from ...modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast from typing import Callable, Optional, Union @auto_docstring(custom_intro='\n Chameleon Model with a head on top used for outputting logits for next token prediction.\n ') class ChameleonForConditionalGeneration(ChameleonPreTrainedModel, GenerationMixin): _tied_weights_keys = ['lm_head.weight'] def __init__(self, config): super().__init__(config) self.model = ChameleonModel(config) self.vocab_size = config.vocab_size self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.post_init() def get_image_tokens(self, pixel_values): return self.model.get_image_tokens(pixel_values) def get_image_features(self, pixel_values): return self.model.get_image_features(pixel_values) @can_return_tuple @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, pixel_values: Optional[torch.FloatTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple, CausalLMOutputWithPast]: """ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Example: ```python >>> from transformers import ChameleonProcessor, ChameleonForConditionalGeneration >>> import torch >>> import requests >>> from PIL import Image >>> model = ChameleonForConditionalGeneration.from_pretrained("facebook/chameleon-7b", dtype=torch.bfloat16) >>> processor = ChameleonProcessor.from_pretrained("facebook/chameleon-7b") >>> prompt = "I used to know a lot about constellations when I was younger, but as I grew older, I forgot most of what I knew. These are the only two constellations that I really remember now.<image><image>I would like for you to tell me about 3 more constellations and give me a little bit of history about the constellation." >>> image = Image.open(requests.get("https://nineplanets.org/wp-content/uploads/2020/12/the-big-dipper-1.jpg", stream=True).raw) >>> image_2 = Image.open(requests.get("https://www.kxan.com/wp-content/uploads/sites/40/2020/10/ORION.jpg", stream=True).raw) >>> inputs = processor(images=[image, image_2], text=prompt, return_tensors="pt").to(model.device, torch.bfloat16) >>> generated_ids = model.generate(**inputs, max_new_tokens=100, do_sample=False) >>> processor.batch_decode(generated_ids, skip_special_tokens=True)[0] ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states outputs = self.model(input_ids=input_ids, pixel_values=pixel_values, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True, cache_position=cache_position, **kwargs) hidden_states = outputs[0] logits = self.lm_head(hidden_states) image_tokens = self.model.vocabulary_mapping.image_tokens logits[:, :, image_tokens] = torch.finfo(logits.dtype).min loss = None if labels is not None: loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.vocab_size, **kwargs) return CausalLMOutputWithPast(loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions) def prepare_inputs_for_generation(self, input_ids, pixel_values=None, past_key_values=None, attention_mask=None, inputs_embeds=None, cache_position=None, position_ids=None, use_cache=True, **kwargs): model_inputs = super().prepare_inputs_for_generation(input_ids, pixel_values=pixel_values, past_key_values=past_key_values, attention_mask=attention_mask, inputs_embeds=inputs_embeds, cache_position=cache_position, position_ids=position_ids, use_cache=use_cache, **kwargs) if cache_position[0] != 0: model_inputs['pixel_values'] = None return model_inputs
@auto_docstring(custom_intro='\n Chameleon Model with a head on top used for outputting logits for next token prediction.\n ') class ChameleonForConditionalGeneration(ChameleonPreTrainedModel, GenerationMixin): def __init__(self, config): pass def get_image_tokens(self, pixel_values): pass def get_image_features(self, pixel_values): pass @can_return_tuple @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, pixel_values: Optional[torch.FloatTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple, CausalLMOutputWithPast]: ''' labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Example: ```python >>> from transformers import ChameleonProcessor, ChameleonForConditionalGeneration >>> import torch >>> import requests >>> from PIL import Image >>> model = ChameleonForConditionalGeneration.from_pretrained("facebook/chameleon-7b", dtype=torch.bfloat16) >>> processor = ChameleonProcessor.from_pretrained("facebook/chameleon-7b") >>> prompt = "I used to know a lot about constellations when I was younger, but as I grew older, I forgot most of what I knew. These are the only two constellations that I really remember now.<image><image>I would like for you to tell me about 3 more constellations and give me a little bit of history about the constellation." >>> image = Image.open(requests.get("https://nineplanets.org/wp-content/uploads/2020/12/the-big-dipper-1.jpg", stream=True).raw) >>> image_2 = Image.open(requests.get("https://www.kxan.com/wp-content/uploads/sites/40/2020/10/ORION.jpg", stream=True).raw) >>> inputs = processor(images=[image, image_2], text=prompt, return_tensors="pt").to(model.device, torch.bfloat16) >>> generated_ids = model.generate(**inputs, max_new_tokens=100, do_sample=False) >>> processor.batch_decode(generated_ids, skip_special_tokens=True)[0] ```''' pass def prepare_inputs_for_generation(self, input_ids, pixel_values=None, past_key_values=None, attention_mask=None, inputs_embeds=None, cache_position=None, position_ids=None, use_cache=True, **kwargs): pass
9
1
20
2
13
5
3
0.37
2
6
3
0
9
3
9
10
194
28
125
50
88
46
63
24
53
10
2
3
24
1,129
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/chameleon/modeling_chameleon.py
transformers.models.chameleon.modeling_chameleon.ChameleonImageVocabularyMapping
import torch.nn.functional as F from functools import cached_property import torch class ChameleonImageVocabularyMapping: """ A class for mapping discrete image tokens from VQGAN to BPE tokens. """ def __init__(self, vocab_map): self.vocab_map = vocab_map self.image_token_id = vocab_map.get('<image>') @cached_property def val2name(self): return {v: k for k, v in self.vocab_map.items()} @cached_property def image_tokens(self): return sorted([val for name, val in self.vocab_map.items() if name.startswith('IMGIMG')]) @cached_property def bpe2img(self): img_tkn_chr_mapping = {chr(ord('A') + i): str(i) for i in range(10)} def remap(old_name: str) -> str: return ''.join((img_tkn_chr_mapping.get(c, c) for c in old_name[len('IMGIMG'):-1])) return {tok: int(remap(self.val2name[tok])) for tok in self.image_tokens} @cached_property def img2bpe(self): return {v: k for k, v in self.bpe2img.items()} @cached_property def bpe2img_search_tensors(self): return (torch.tensor(sorted(self.bpe2img.keys())), torch.tensor(sorted(self.bpe2img.values()))) @cached_property def img2bpe_mapping_tensor(self): mapping = torch.zeros(max(self.img2bpe.keys()) + 1, dtype=torch.int) for k, v in self.img2bpe.items(): mapping[k] = v return mapping def convert_img2bpe(self, img_batch: torch.Tensor) -> torch.Tensor: device = img_batch.device img_tokens = self.img2bpe_mapping_tensor[img_batch.to('cpu')] return img_tokens.to(device)
class ChameleonImageVocabularyMapping: ''' A class for mapping discrete image tokens from VQGAN to BPE tokens. ''' def __init__(self, vocab_map): pass @cached_property def val2name(self): pass @cached_property def image_tokens(self): pass @cached_property def bpe2img(self): pass def remap(old_name: str) -> str: pass @cached_property def img2bpe(self): pass @cached_property def bpe2img_search_tensors(self): pass @cached_property def img2bpe_mapping_tensor(self): pass def convert_img2bpe(self, img_batch: torch.Tensor) -> torch.Tensor: pass
16
1
3
0
3
0
1
0.09
0
4
0
0
8
2
8
8
45
10
32
24
16
3
26
17
16
2
0
1
10
1,130
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/chameleon/modeling_chameleon.py
transformers.models.chameleon.modeling_chameleon.ChameleonLayerNorm
from torch import nn import torch.nn.functional as F class ChameleonLayerNorm(nn.LayerNorm): """ LayerNorm but computes stats only over the last dim because Chameleon applies gamma and beta from each shard separately to each head, instead of reducing. We can apply each head's own gamma/beta by repeat-interleaving weights from each shard, but the stats have to be computed in the last dimension. This module applies gamma/beta manually to fulfill this requirement. """ def __init__(self, hidden_size, *args, **kwargs): super().__init__(hidden_size, *args, **kwargs) self.normalized_shape = (hidden_size[-1],) def forward(self, hidden_states): hidden_states = F.layer_norm(hidden_states, self.normalized_shape, None, None, eps=1e-05) hidden_states = hidden_states * self.weight + self.bias return hidden_states
class ChameleonLayerNorm(nn.LayerNorm): ''' LayerNorm but computes stats only over the last dim because Chameleon applies gamma and beta from each shard separately to each head, instead of reducing. We can apply each head's own gamma/beta by repeat-interleaving weights from each shard, but the stats have to be computed in the last dimension. This module applies gamma/beta manually to fulfill this requirement. ''' def __init__(self, hidden_size, *args, **kwargs): pass def forward(self, hidden_states): pass
3
1
4
0
4
0
1
0.75
1
1
0
0
2
1
2
2
16
2
8
4
5
6
8
4
5
1
1
0
2
1,131
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/chameleon/modeling_chameleon.py
transformers.models.chameleon.modeling_chameleon.ChameleonLinearScalingRotaryEmbedding
class ChameleonLinearScalingRotaryEmbedding(ChameleonRotaryEmbedding): """ChameleonRotaryEmbedding extended with linear scaling. Credits to the Reddit user /u/kaiokendev""" def forward(self, x, position_ids): position_ids = position_ids.float() / self.scaling_factor cos, sin = super().forward(x, position_ids) return (cos, sin)
class ChameleonLinearScalingRotaryEmbedding(ChameleonRotaryEmbedding): '''ChameleonRotaryEmbedding extended with linear scaling. Credits to the Reddit user /u/kaiokendev''' def forward(self, x, position_ids): pass
2
1
5
0
4
1
1
0.4
1
1
0
0
1
0
1
13
8
1
5
3
3
2
5
3
3
1
2
0
1
1,132
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/chameleon/modeling_chameleon.py
transformers.models.chameleon.modeling_chameleon.ChameleonMLP
from torch import nn from ...activations import ACT2FN class ChameleonMLP(nn.Module): def __init__(self, config): super().__init__() self.config = config self.hidden_size = config.hidden_size self.intermediate_size = config.intermediate_size self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=config.mlp_bias) self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=config.mlp_bias) self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=config.mlp_bias) self.act_fn = ACT2FN[config.hidden_act] def forward(self, x): down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x)) return down_proj
class ChameleonMLP(nn.Module): def __init__(self, config): pass def forward(self, x): pass
3
0
6
0
6
0
1
0.08
1
1
0
0
2
7
2
12
15
1
13
11
10
1
13
11
10
1
1
0
2
1,133
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/chameleon/modeling_chameleon.py
transformers.models.chameleon.modeling_chameleon.ChameleonModel
from .configuration_chameleon import ChameleonConfig, ChameleonVQVAEConfig from ...modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast from ...cache_utils import Cache, DynamicCache from ...processing_utils import Unpack import torch import torch.nn.functional as F from typing import Callable, Optional, Union from ...utils import TransformersKwargs, auto_docstring, can_return_tuple, logging from ...modeling_flash_attention_utils import FlashAttentionKwargs from ...masking_utils import create_causal_mask from torch import nn @auto_docstring class ChameleonModel(ChameleonPreTrainedModel): def __init__(self, config: ChameleonConfig): super().__init__(config) self.padding_idx = config.pad_token_id self.vocab_size = config.vocab_size self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx) self.vocabulary_mapping = ChameleonImageVocabularyMapping(config.vocabulary_map) decoder_layer = ChameleonDecoderLayer if not self.config.swin_norm else ChameleonSwinDecoderLayer self.layers = nn.ModuleList([decoder_layer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]) self.norm = ChameleonRMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.vqmodel = ChameleonVQVAE._from_config(config.vq_config) self.gradient_checkpointing = False self.post_init() def get_image_tokens(self, pixel_values: torch.FloatTensor): """ Tokenizes images into discrete tokens with VQGAN module. Converts obtained image tokens into BPE tokens and wraps with "boi" and "eoi" special tokens. Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)): The tensors corresponding to the input images. """ batch_size = pixel_values.shape[0] _, _, image_toks = self.vqmodel.encode(pixel_values) bpe_toks = self.vocabulary_mapping.convert_img2bpe(image_toks) bpe_toks = bpe_toks.view(batch_size, -1) return bpe_toks def get_image_features(self, pixel_values: torch.FloatTensor): """ Tokenizes images into discrete tokens with VQGAN module and embeds them with text embeddings layer Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)): The tensors corresponding to the input images. """ image_tokens = self.get_image_tokens(pixel_values) vision_embeddings = self.get_input_embeddings()(image_tokens) return vision_embeddings def get_placeholder_mask(self, input_ids: torch.LongTensor, inputs_embeds: torch.FloatTensor, image_features: torch.FloatTensor): """ Obtains multimodal placeholder mask from `input_ids` or `inputs_embeds`, and checks that the placeholder token count is equal to the length of multimodal features. If the lengths are different, an error is raised. """ if input_ids is None: special_image_mask = inputs_embeds == self.get_input_embeddings()(torch.tensor(self.vocabulary_mapping.image_token_id, dtype=torch.long, device=inputs_embeds.device)) special_image_mask = special_image_mask.all(-1) else: special_image_mask = input_ids == self.vocabulary_mapping.image_token_id n_image_tokens = special_image_mask.sum() special_image_mask = special_image_mask.unsqueeze(-1).expand_as(inputs_embeds).to(inputs_embeds.device) n_image_features = image_features.shape[0] * image_features.shape[1] if inputs_embeds[special_image_mask].numel() != image_features.numel(): raise ValueError(f'Image features and image tokens do not match: tokens: {n_image_tokens}, features {n_image_features}') return special_image_mask @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, pixel_values: Optional[torch.FloatTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, **kwargs: Unpack[FlashAttentionKwargs]) -> Union[tuple, BaseModelOutputWithPast]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if self.gradient_checkpointing and self.training and use_cache: logger.warning_once('`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`.') use_cache = False if (input_ids is None) ^ (inputs_embeds is not None): raise ValueError('You must specify exactly one of input_ids or inputs_embeds') if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) if pixel_values is not None: image_embeds = self.get_image_features(pixel_values) special_image_mask = self.get_placeholder_mask(input_ids, inputs_embeds=inputs_embeds, image_features=image_embeds) inputs_embeds = inputs_embeds.masked_scatter(special_image_mask, image_embeds) if use_cache and past_key_values is None and (not torch.jit.is_tracing()): past_key_values = DynamicCache(config=self.config) if cache_position is None: past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 cache_position = torch.arange(past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device) if position_ids is None: position_ids = cache_position.unsqueeze(0) causal_mask = create_causal_mask(config=self.config, input_embeds=inputs_embeds, attention_mask=attention_mask, cache_position=cache_position, past_key_values=past_key_values, position_ids=position_ids) hidden_states = inputs_embeds all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None for decoder_layer in self.layers: if output_hidden_states: all_hidden_states += (hidden_states,) layer_outputs = decoder_layer(hidden_states, attention_mask=causal_mask, position_ids=position_ids, past_key_values=past_key_values, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position, **kwargs) hidden_states = layer_outputs[0] if output_attentions: all_self_attns += (layer_outputs[1],) hidden_states = self.norm(hidden_states) if output_hidden_states: all_hidden_states += (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, past_key_values, all_hidden_states, all_self_attns] if v is not None)) return BaseModelOutputWithPast(last_hidden_state=hidden_states, past_key_values=past_key_values, hidden_states=all_hidden_states, attentions=all_self_attns)
@auto_docstring class ChameleonModel(ChameleonPreTrainedModel): def __init__(self, config: ChameleonConfig): pass def get_image_tokens(self, pixel_values: torch.FloatTensor): ''' Tokenizes images into discrete tokens with VQGAN module. Converts obtained image tokens into BPE tokens and wraps with "boi" and "eoi" special tokens. Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)): The tensors corresponding to the input images. ''' pass def get_image_features(self, pixel_values: torch.FloatTensor): ''' Tokenizes images into discrete tokens with VQGAN module and embeds them with text embeddings layer Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)): The tensors corresponding to the input images. ''' pass def get_placeholder_mask(self, input_ids: torch.LongTensor, inputs_embeds: torch.FloatTensor, image_features: torch.FloatTensor): ''' Obtains multimodal placeholder mask from `input_ids` or `inputs_embeds`, and checks that the placeholder token count is equal to the length of multimodal features. If the lengths are different, an error is raised. ''' pass @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, pixel_values: Optional[torch.FloatTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, **kwargs: Unpack[FlashAttentionKwargs]) -> Union[tuple, BaseModelOutputWithPast]: pass
8
3
40
5
30
6
6
0.24
1
18
11
0
6
8
7
8
306
40
216
75
171
51
114
44
106
26
2
2
44
1,134
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/chameleon/modeling_chameleon.py
transformers.models.chameleon.modeling_chameleon.ChameleonPreTrainedModel
from .configuration_chameleon import ChameleonConfig, ChameleonVQVAEConfig from ...utils import TransformersKwargs, auto_docstring, can_return_tuple, logging from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel @auto_docstring class ChameleonPreTrainedModel(PreTrainedModel): config: ChameleonConfig base_model_prefix = 'model' supports_gradient_checkpointing = True _no_split_modules = ['ChameleonDecoderLayer', 'ChameleonSwinDecoderLayer'] _skip_keys_device_placement = ['past_key_values', 'causal_mask'] _supports_flash_attn = True _supports_sdpa = True _can_compile_fullgraph = True _supports_param_buffer_assignment = False _supports_flex_attn = True _supports_attention_backend = True
@auto_docstring class ChameleonPreTrainedModel(PreTrainedModel): pass
2
0
12
0
12
0
6
0
1
1
1
4
1
0
1
1
25
1
24
14
22
0
22
14
20
6
1
2
6
1,135
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/chameleon/modeling_chameleon.py
transformers.models.chameleon.modeling_chameleon.ChameleonRMSNorm
import torch.nn.functional as F from torch import nn import torch class ChameleonRMSNorm(nn.Module): def __init__(self, hidden_size, eps=1e-06): """ ChameleonRMSNorm is equivalent to T5LayerNorm """ super().__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.variance_epsilon = eps def forward(self, hidden_states): input_dtype = hidden_states.dtype hidden_states = hidden_states.to(torch.float32) variance = hidden_states.pow(2).mean(-1, keepdim=True) hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) return self.weight * hidden_states.to(input_dtype) def extra_repr(self): return f'{tuple(self.weight.shape)}, eps={self.variance_epsilon}'
class ChameleonRMSNorm(nn.Module): def __init__(self, hidden_size, eps=1e-06): ''' ChameleonRMSNorm is equivalent to T5LayerNorm ''' pass def forward(self, hidden_states): pass def extra_repr(self): pass
4
1
5
0
4
1
1
0.23
1
2
0
0
3
2
3
13
18
2
13
8
9
3
13
8
9
1
1
0
3
1,136
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/chameleon/modeling_chameleon.py
transformers.models.chameleon.modeling_chameleon.ChameleonRotaryEmbedding
from torch import nn import torch import torch.nn.functional as F class ChameleonRotaryEmbedding(nn.Module): inv_freq: torch.Tensor def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0): super().__init__() self.scaling_factor = scaling_factor self.dim = dim self.max_position_embeddings = max_position_embeddings self.base = base inv_freq = 1.0 / self.base ** (torch.arange(0, self.dim, 2, dtype=torch.int64).to(device=device, dtype=torch.float) / self.dim) self.register_buffer('inv_freq', inv_freq, persistent=False) self.max_seq_len_cached = max_position_embeddings @torch.no_grad() def forward(self, x, position_ids): inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1) position_ids_expanded = position_ids[:, None, :].float() device_type = x.device.type device_type = device_type if device_type != 'mps' else 'cpu' with torch.autocast(device_type=device_type, enabled=False): freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2) emb = torch.cat((freqs, freqs), dim=-1) cos = emb.cos() sin = emb.sin() return (cos.to(dtype=x.dtype), sin.to(dtype=x.dtype))
class ChameleonRotaryEmbedding(nn.Module): def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0): pass @torch.no_grad() def forward(self, x, position_ids): pass
4
0
12
0
10
2
2
0.18
1
3
0
2
2
5
2
12
27
1
22
17
18
4
21
16
18
2
1
1
3
1,137
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/chameleon/modeling_chameleon.py
transformers.models.chameleon.modeling_chameleon.ChameleonSwinDecoderLayer
from .configuration_chameleon import ChameleonConfig, ChameleonVQVAEConfig from ...modeling_layers import GradientCheckpointingLayer from typing import Callable, Optional, Union from ...cache_utils import Cache, DynamicCache import torch.nn.functional as F import torch from ...utils.deprecation import deprecate_kwarg class ChameleonSwinDecoderLayer(GradientCheckpointingLayer): def __init__(self, config: ChameleonConfig, layer_idx: int): super().__init__() self.hidden_size = config.hidden_size self.self_attn = ChameleonAttention(config=config, layer_idx=layer_idx) self.mlp = ChameleonMLP(config) self.input_layernorm = ChameleonRMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.post_attention_layernorm = ChameleonRMSNorm(config.hidden_size, eps=config.rms_norm_eps) @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, output_attentions: Optional[bool]=False, use_cache: Optional[bool]=False, cache_position: Optional[torch.LongTensor]=None, **kwargs) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`, *optional*): attention mask of size `(batch_size, sequence_length)` if flash attention is used or `(batch_size, 1, query_sequence_length, key_sequence_length)` if default attention is used. position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings past_key_values (`Cache`, *optional*): cached past key and value projection states output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*): Indices depicting the position of the input sequence tokens in the sequence. """ residual = hidden_states hidden_states, self_attn_weights = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position, **kwargs) hidden_states = self.input_layernorm(hidden_states) hidden_states = residual + hidden_states residual = hidden_states hidden_states = self.mlp(hidden_states) hidden_states = self.post_attention_layernorm(hidden_states) hidden_states = residual + hidden_states outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights,) return outputs
class ChameleonSwinDecoderLayer(GradientCheckpointingLayer): def __init__(self, config: ChameleonConfig, layer_idx: int): pass @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, output_attentions: Optional[bool]=False, use_cache: Optional[bool]=False, cache_position: Optional[torch.LongTensor]=None, **kwargs) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]: ''' Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`, *optional*): attention mask of size `(batch_size, sequence_length)` if flash attention is used or `(batch_size, 1, query_sequence_length, key_sequence_length)` if default attention is used. position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings past_key_values (`Cache`, *optional*): cached past key and value projection states output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*): Indices depicting the position of the input sequence tokens in the sequence. ''' pass
4
1
35
4
21
11
2
0.5
1
8
4
0
2
5
2
12
71
8
42
21
29
21
23
11
20
3
1
1
4
1,138
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/chameleon/modeling_chameleon.py
transformers.models.chameleon.modeling_chameleon.ChameleonVQVAE
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple, logging import torch.nn.functional as F from .configuration_chameleon import ChameleonConfig, ChameleonVQVAEConfig import torch @auto_docstring(custom_intro='\n The VQ-VAE model used in Chameleon for encoding/decoding images into discrete tokens.\n This model follows the "Make-a-scene: Scene-based text-to-image generation with human priors" paper from\n [ Oran Gafni, Adam Polyak, Oron Ashual, Shelly Sheynin, Devi Parikh, and Yaniv\n Taigman](https://huggingface.co/papers/2203.13131).\n ') class ChameleonVQVAE(ChameleonPreTrainedModel): config: ChameleonVQVAEConfig _no_split_modules = ['ChameleonVQVAEVectorQuantizer', 'ChameleonVQVAEEncoderAttnBlock', 'ChameleonVQVAEEncoderResnetBlock'] def __init__(self, config: ChameleonVQVAEConfig): super().__init__(config) self.encoder = ChameleonVQVAEEncoder(config) self.quantize = ChameleonVQVAEVectorQuantizer(config) self.quant_conv = torch.nn.Conv2d(config.latent_channels, config.embed_dim, 1) self.post_quant_conv = torch.nn.Conv2d(config.embed_dim, config.latent_channels, 1) self.eval() def encode(self, pixel_values: torch.LongTensor): hidden_states = self.encoder(pixel_values) hidden_states = self.quant_conv(hidden_states) quant, emb_loss, indices = self.quantize(hidden_states) return (quant, emb_loss, indices)
@auto_docstring(custom_intro='\n The VQ-VAE model used in Chameleon for encoding/decoding images into discrete tokens.\n This model follows the "Make-a-scene: Scene-based text-to-image generation with human priors" paper from\n [ Oran Gafni, Adam Polyak, Oron Ashual, Shelly Sheynin, Devi Parikh, and Yaniv\n Taigman](https://huggingface.co/papers/2203.13131).\n ') class ChameleonVQVAE(ChameleonPreTrainedModel): def __init__(self, config: ChameleonVQVAEConfig): pass def encode(self, pixel_values: torch.LongTensor): pass
4
0
8
0
8
0
2
0.04
1
4
3
0
3
4
3
4
30
4
26
13
22
1
24
13
20
5
2
2
7
1,139
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/chameleon/modeling_chameleon.py
transformers.models.chameleon.modeling_chameleon.ChameleonVQVAEEncoder
from torch import nn import torch.nn.functional as F import torch class ChameleonVQVAEEncoder(nn.Module): def __init__(self, config): super().__init__() self.num_resolutions = len(config.channel_multiplier) self.num_res_blocks = config.num_res_blocks base_channels = config.base_channels resolution = config.resolution in_channels = config.in_channels double_latent = config.double_latent latent_channels = config.latent_channels channel_multiplier = config.channel_multiplier self.conv_in = torch.nn.Conv2d(in_channels, base_channels, kernel_size=3, stride=1, padding=1) curr_res = resolution in_channel_multiplier = (1,) + tuple(channel_multiplier) self.in_channel_multiplier = in_channel_multiplier self.down = nn.ModuleList() for i_level in range(self.num_resolutions): block = nn.ModuleList() attn = nn.ModuleList() block_in = base_channels * in_channel_multiplier[i_level] block_out = base_channels * channel_multiplier[i_level] for i_block in range(self.num_res_blocks): block.append(ChameleonVQVAEEncoderResnetBlock(config=config, in_channels=block_in, out_channels=block_out)) block_in = block_out if config.attn_resolutions is not None and curr_res in config.attn_resolutions and (config.attn_type == 'vanilla'): attn.append(ChameleonVQVAEEncoderAttnBlock(block_in)) down = nn.Module() down.block = block down.attn = attn if i_level != self.num_resolutions - 1: down.downsample = ChameleonVQVAEEncoderConvDownsample(block_in) curr_res = curr_res // 2 self.down.append(down) self.mid = nn.Module() self.mid.block_1 = ChameleonVQVAEEncoderResnetBlock(config=config, in_channels=block_in, out_channels=block_in) self.mid.attn_1 = ChameleonVQVAEEncoderAttnBlock(block_in) if config.attn_type == 'vanilla' else nn.Identity() self.mid.block_2 = ChameleonVQVAEEncoderResnetBlock(config=config, in_channels=block_in, out_channels=block_in) self.norm_out = torch.nn.GroupNorm(num_groups=32, num_channels=block_in, eps=1e-06, affine=True) self.conv_out = torch.nn.Conv2d(block_in, 2 * latent_channels if double_latent else latent_channels, kernel_size=3, stride=1, padding=1) def forward(self, pixel_values: torch.LongTensor): hidden_states = [self.conv_in(pixel_values)] for i_level in range(self.num_resolutions): for i_block in range(self.num_res_blocks): hidden_state = self.down[i_level].block[i_block](hidden_states[-1]) if len(self.down[i_level].attn) > 0: hidden_state = self.down[i_level].attn[i_block](hidden_state) hidden_states.append(hidden_state) if i_level != self.num_resolutions - 1: hidden_states.append(self.down[i_level].downsample(hidden_states[-1])) last_hidden_state = hidden_states[-1] last_hidden_state = self.mid.block_1(last_hidden_state) last_hidden_state = self.mid.attn_1(last_hidden_state) last_hidden_state = self.mid.block_2(last_hidden_state) last_hidden_state = self.norm_out(last_hidden_state) last_hidden_state *= torch.sigmoid(last_hidden_state) last_hidden_state = self.conv_out(last_hidden_state) return last_hidden_state
class ChameleonVQVAEEncoder(nn.Module): def __init__(self, config): pass def forward(self, pixel_values: torch.LongTensor): pass
3
0
47
4
41
2
6
0.04
1
6
3
0
2
8
2
12
95
9
83
31
80
3
57
31
54
7
1
3
12
1,140
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/chameleon/modeling_chameleon.py
transformers.models.chameleon.modeling_chameleon.ChameleonVQVAEEncoderAttnBlock
from torch import nn import torch import torch.nn.functional as F class ChameleonVQVAEEncoderAttnBlock(nn.Module): def __init__(self, in_channels): super().__init__() self.in_channels = in_channels self.norm = torch.nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-06, affine=True) self.q = torch.nn.Conv2d(in_channels, in_channels, kernel_size=1, stride=1, padding=0) self.k = torch.nn.Conv2d(in_channels, in_channels, kernel_size=1, stride=1, padding=0) self.v = torch.nn.Conv2d(in_channels, in_channels, kernel_size=1, stride=1, padding=0) self.proj_out = torch.nn.Conv2d(in_channels, in_channels, kernel_size=1, stride=1, padding=0) def forward(self, hidden_states): residual = hidden_states hidden_states = self.norm(hidden_states) query_states = self.q(hidden_states) key_states = self.k(hidden_states) value_states = self.v(hidden_states) batch_size, channels, height, width = query_states.shape query_states = query_states.reshape(batch_size, channels, height * width).permute(0, 2, 1) key_states = key_states.reshape(batch_size, channels, height * width) attn_weights = torch.bmm(query_states, key_states) attn_weights = attn_weights * int(channels) ** (-0.5) attn_weights = F.softmax(attn_weights, dim=2) value_states = value_states.reshape(batch_size, channels, height * width) attn_weights = attn_weights.permute(0, 2, 1) attn_output = torch.bmm(value_states, attn_weights).reshape(batch_size, channels, height, width) attn_output = self.proj_out(attn_output) return residual + attn_output
class ChameleonVQVAEEncoderAttnBlock(nn.Module): def __init__(self, in_channels): pass def forward(self, hidden_states): pass
3
0
16
2
13
1
1
0.08
1
2
0
0
2
6
2
12
33
5
26
16
23
2
26
16
23
1
1
0
2
1,141
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/chameleon/modeling_chameleon.py
transformers.models.chameleon.modeling_chameleon.ChameleonVQVAEEncoderConvDownsample
from torch import nn import torch.nn.functional as F class ChameleonVQVAEEncoderConvDownsample(nn.Module): def __init__(self, in_channels): super().__init__() self.conv = nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=2, padding=0) def forward(self, hidden_states): hidden_states = F.pad(hidden_states, pad=(0, 1, 0, 1), mode='constant', value=0) hidden_states = self.conv(hidden_states) return hidden_states
class ChameleonVQVAEEncoderConvDownsample(nn.Module): def __init__(self, in_channels): pass def forward(self, hidden_states): pass
3
0
4
0
4
1
1
0.13
1
1
0
1
2
1
2
12
10
1
8
4
5
1
8
4
5
1
1
0
2
1,142
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/chameleon/modeling_chameleon.py
transformers.models.chameleon.modeling_chameleon.ChameleonVQVAEEncoderResnetBlock
import torch from torch import nn import torch.nn.functional as F class ChameleonVQVAEEncoderResnetBlock(nn.Module): def __init__(self, config, in_channels, out_channels=None, conv_shortcut=False): super().__init__() self.in_channels = in_channels self.out_channels = in_channels if out_channels is None else out_channels self.use_conv_shortcut = conv_shortcut self.norm1 = torch.nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-06, affine=True) self.conv1 = torch.nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1) self.norm2 = torch.nn.GroupNorm(num_groups=32, num_channels=out_channels, eps=1e-06, affine=True) self.dropout = torch.nn.Dropout(config.dropout) self.conv2 = torch.nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1) if self.in_channels != self.out_channels: if self.use_conv_shortcut: self.conv_shortcut = torch.nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1) else: self.nin_shortcut = torch.nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, padding=0) def forward(self, hidden_states): residual = hidden_states hidden_states = self.norm1(hidden_states) hidden_states *= torch.sigmoid(hidden_states) hidden_states = self.conv1(hidden_states) hidden_states = self.norm2(hidden_states) hidden_states *= torch.sigmoid(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.conv2(hidden_states) if self.in_channels != self.out_channels: if self.use_conv_shortcut: residual = self.conv_shortcut(residual) else: residual = self.nin_shortcut(residual) return residual + hidden_states
class ChameleonVQVAEEncoderResnetBlock(nn.Module): def __init__(self, config, in_channels, out_channels=None, conv_shortcut=False): pass def forward(self, hidden_states): pass
3
0
20
2
18
0
4
0
1
1
0
0
2
10
2
12
42
5
37
20
28
0
29
14
26
4
1
2
7
1,143
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/chameleon/modeling_chameleon.py
transformers.models.chameleon.modeling_chameleon.ChameleonVQVAEVectorQuantizer
import torch from torch import nn import torch.nn.functional as F class ChameleonVQVAEVectorQuantizer(nn.Module): """ A module for vector quantization using learned embedding vectors. This module implements the quantization process similar to te one described in the VQ-VAE (Vector Quantized Variational AutoEncoder) paper. It quantizes continuous input vectors into discrete codebook vectors, which are learned during training. Current implementation improves over previous ones by avoiding costly matrix multiplications and allowing for post-hoc remapping of indices. """ def __init__(self, config): super().__init__() self.num_embeddings = config.num_embeddings self.embedding_dim = config.embed_dim self.beta = getattr(config, 'beta', 0.25) self.embedding = nn.Embedding(self.num_embeddings, self.embedding_dim) def forward(self, hidden_state: torch.Tensor): hidden_state = hidden_state.permute(0, 2, 3, 1).contiguous() hidden_state_flattened = hidden_state.view(-1, self.embedding_dim) distances = torch.sum(hidden_state_flattened ** 2, dim=1, keepdim=True) + torch.sum(self.embedding.weight ** 2, dim=1) - 2 * torch.einsum('bd,dn->bn', hidden_state_flattened, self.embedding.weight.transpose(0, 1)) min_encoding_indices = torch.argmin(distances, dim=1) hidden_state_quant = self.embedding(min_encoding_indices).view(hidden_state.shape) loss = torch.mean((hidden_state_quant.detach() - hidden_state) ** 2) + self.beta * torch.mean((hidden_state_quant - hidden_state.detach()) ** 2) hidden_state_quant = hidden_state + (hidden_state_quant - hidden_state).detach() hidden_state_quant = hidden_state_quant.permute(0, 3, 1, 2).contiguous() return (hidden_state_quant, loss, min_encoding_indices)
class ChameleonVQVAEVectorQuantizer(nn.Module): ''' A module for vector quantization using learned embedding vectors. This module implements the quantization process similar to te one described in the VQ-VAE (Vector Quantized Variational AutoEncoder) paper. It quantizes continuous input vectors into discrete codebook vectors, which are learned during training. Current implementation improves over previous ones by avoiding costly matrix multiplications and allowing for post-hoc remapping of indices. ''' def __init__(self, config): pass def forward(self, hidden_state: torch.Tensor): pass
3
1
17
4
12
2
1
0.5
1
2
0
0
2
5
2
12
46
10
24
13
21
12
18
13
15
1
1
0
2
1,144
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/chameleon/processing_chameleon.py
transformers.models.chameleon.processing_chameleon.ChameleonProcessor
import numpy as np from ...processing_utils import MultiModalData, ProcessingKwargs, ProcessorMixin, TextKwargs, Unpack from typing import Optional, Union from ...tokenization_utils_base import PreTokenizedInput, TextInput from ...feature_extraction_utils import BatchFeature from ...image_utils import ImageInput class ChameleonProcessor(ProcessorMixin): """ Constructs a Chameleon processor which wraps a Chameleon image processor and a Chameleon tokenizer into a single processor. [`ChameleonProcessor`] offers all the functionalities of [`ChameleonImageProcessor`] and [`LlamaTokenizerFast`]. See the [`~ChameleonProcessor.__call__`] and [`~ChameleonProcessor.decode`] for more information. Args: image_processor ([`ChameleonImageProcessor`]): The image processor is a required input. tokenizer ([`LlamaTokenizerFast`]): The tokenizer is a required input. image_seq_length (`int`, *optional*, defaults to 1024): Sequence length of one image embedding. image_token (`str`, *optional*, defaults to `"<image>"`): The special token used to indicate image in the text. """ attributes = ['image_processor', 'tokenizer'] tokenizer_class = ('LlamaTokenizer', 'LlamaTokenizerFast') image_processor_class = 'ChameleonImageProcessor' def __init__(self, image_processor, tokenizer, image_seq_length: int=1024, image_token: str='<image>'): self.image_seq_length = image_seq_length self.image_token = tokenizer.image_token if hasattr(tokenizer, 'image_token') else image_token self.image_token_id = tokenizer.convert_tokens_to_ids(self.image_token) self.image_start_token = tokenizer.boi_token if hasattr(tokenizer, 'boi_token') else '<racm3:break>' self.image_end_token = tokenizer.eoi_token if hasattr(tokenizer, 'eoi_token') else '<eoss>' self.image_token_id = tokenizer.convert_tokens_to_ids(self.image_token) self.image_start_token_id = tokenizer.convert_tokens_to_ids(self.image_start_token) self.image_end_token_id = tokenizer.convert_tokens_to_ids(self.image_end_token) self.image_ids = [self.image_token_id, self.image_start_token_id, self.image_end_token_id] super().__init__(image_processor, tokenizer) def __call__(self, images: Optional[ImageInput]=None, text: Optional[Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]]]=None, audio=None, videos=None, **kwargs: Unpack[ChameleonProcessorKwargs]) -> BatchFeature: """ Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text` and `kwargs` arguments to LlamaTokenizerFast's [`~LlamaTokenizerFast.__call__`] if `text` is not `None` to encode the text. To prepare the image(s), this method forwards the `images` and `kwargs` arguments to CLIPImageProcessor's [`~CLIPImageProcessor.__call__`] if `images` is not `None`. Please refer to the docstring of the above two methods for more information. Args: images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `list[PIL.Image.Image]`, `list[np.ndarray]`, `list[torch.Tensor]`): The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch tensor. Both channels-first and channels-last formats are supported. text (`str`, `list[str]`, `list[list[str]]`): The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set `is_split_into_words=True` (to lift the ambiguity with a batch of sequences). return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors of a particular framework. Acceptable values are: - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return NumPy `np.ndarray` objects. Returns: [`BatchFeature`]: A [`BatchFeature`] with the following fields: - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`. - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not `None`). - **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`. """ if isinstance(text, str): text = [text] elif not isinstance(text, list) and (not isinstance(text[0], str)): raise TypeError('Invalid input text. Please provide a string, or a list of strings') if text is None and images is None: raise ValueError('You must provide either text or images') output_kwargs = self._merge_kwargs(ChameleonProcessorKwargs, tokenizer_init_kwargs=self.tokenizer.init_kwargs, **kwargs) return_for_text_completion = output_kwargs['text_kwargs'].pop('return_for_text_completion', False) prompt_strings = [] one_img_tokens = self.image_start_token + self.image_token * self.image_seq_length + self.image_end_token for sample in text: sample = sample.replace(self.image_token, one_img_tokens) if not return_for_text_completion: sample += self.tokenizer.sep_token prompt_strings.append(sample) image_inputs = {} if images is not None: image_inputs = self.image_processor(images, **output_kwargs['images_kwargs']) return_tensors = output_kwargs['text_kwargs'].pop('return_tensors', None) return_mm_token_type_ids = output_kwargs['text_kwargs'].pop('return_mm_token_type_ids', False) text_inputs = self.tokenizer(prompt_strings, **output_kwargs['text_kwargs'], return_tensors=None) self._check_special_mm_tokens(prompt_strings, text_inputs, modalities=['image']) if return_mm_token_type_ids: array_ids = np.array(text_inputs['input_ids']) mm_token_type_ids = np.zeros_like(text_inputs['input_ids']) mm_token_type_ids[np.isin(array_ids, self.image_ids)] = 1 text_inputs['mm_token_type_ids'] = mm_token_type_ids.tolist() return BatchFeature(data={**text_inputs, **image_inputs}, tensor_type=return_tensors) def _get_num_multimodal_tokens(self, image_sizes=None, **kwargs): """ Computes the number of placeholder tokens needed for multimodal inputs with the given sizes. Args: image_sizes (`list[list[int]]`, *optional*): The input sizes formatted as (height, width) per each image. Returns: `MultiModalData`: A `MultiModalData` object holding number of tokens per each of the provided input modalities, along with other useful data. """ vision_data = {} if image_sizes is not None: num_image_tokens = [self.image_seq_length + 2] * len(image_sizes) num_image_patches = [1] * len(image_sizes) vision_data.update({'num_image_tokens': num_image_tokens, 'num_image_patches': num_image_patches}) return MultiModalData(**vision_data)
class ChameleonProcessor(ProcessorMixin): ''' Constructs a Chameleon processor which wraps a Chameleon image processor and a Chameleon tokenizer into a single processor. [`ChameleonProcessor`] offers all the functionalities of [`ChameleonImageProcessor`] and [`LlamaTokenizerFast`]. See the [`~ChameleonProcessor.__call__`] and [`~ChameleonProcessor.decode`] for more information. Args: image_processor ([`ChameleonImageProcessor`]): The image processor is a required input. tokenizer ([`LlamaTokenizerFast`]): The tokenizer is a required input. image_seq_length (`int`, *optional*, defaults to 1024): Sequence length of one image embedding. image_token (`str`, *optional*, defaults to `"<image>"`): The special token used to indicate image in the text. ''' def __init__(self, image_processor, tokenizer, image_seq_length: int=1024, image_token: str='<image>'): pass def __call__(self, images: Optional[ImageInput]=None, text: Optional[Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]]]=None, audio=None, videos=None, **kwargs: Unpack[ChameleonProcessorKwargs]) -> BatchFeature: ''' Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text` and `kwargs` arguments to LlamaTokenizerFast's [`~LlamaTokenizerFast.__call__`] if `text` is not `None` to encode the text. To prepare the image(s), this method forwards the `images` and `kwargs` arguments to CLIPImageProcessor's [`~CLIPImageProcessor.__call__`] if `images` is not `None`. Please refer to the docstring of the above two methods for more information. Args: images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `list[PIL.Image.Image]`, `list[np.ndarray]`, `list[torch.Tensor]`): The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch tensor. Both channels-first and channels-last formats are supported. text (`str`, `list[str]`, `list[list[str]]`): The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set `is_split_into_words=True` (to lift the ambiguity with a batch of sequences). return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors of a particular framework. Acceptable values are: - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return NumPy `np.ndarray` objects. Returns: [`BatchFeature`]: A [`BatchFeature`] with the following fields: - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`. - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not `None`). - **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`. ''' pass def _get_num_multimodal_tokens(self, image_sizes=None, **kwargs): ''' Computes the number of placeholder tokens needed for multimodal inputs with the given sizes. Args: image_sizes (`list[list[int]]`, *optional*): The input sizes formatted as (height, width) per each image. Returns: `MultiModalData`: A `MultiModalData` object holding number of tokens per each of the provided input modalities, along with other useful data. ''' pass
4
3
19
2
10
8
3
1.07
1
9
2
0
5
4
5
22
128
18
54
30
40
58
39
22
33
7
2
2
14
1,145
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/chameleon/processing_chameleon.py
transformers.models.chameleon.processing_chameleon.ChameleonProcessorKwargs
from ...processing_utils import MultiModalData, ProcessingKwargs, ProcessorMixin, TextKwargs, Unpack class ChameleonProcessorKwargs(ProcessingKwargs, total=False): text_kwargs: ChameleonTextKwargs _defaults = {'text_kwargs': {'padding': False, 'return_for_text_completion': False, 'return_mm_token_type_ids': False}, 'common_kwargs': {'return_tensors': 'pt'}}
class ChameleonProcessorKwargs(ProcessingKwargs, total=False): pass
1
0
0
0
0
0
0
0
2
0
0
0
0
0
0
0
11
0
11
2
10
0
3
2
2
0
3
0
0
1,146
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/chameleon/processing_chameleon.py
transformers.models.chameleon.processing_chameleon.ChameleonTextKwargs
from ...processing_utils import MultiModalData, ProcessingKwargs, ProcessorMixin, TextKwargs, Unpack class ChameleonTextKwargs(TextKwargs, total=False): return_for_text_completion: bool
class ChameleonTextKwargs(TextKwargs, total=False): pass
1
0
0
0
0
0
0
0
2
0
0
0
0
0
0
0
2
0
2
1
1
0
2
1
1
0
2
0
0
1,147
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/chinese_clip/configuration_chinese_clip.py
transformers.models.chinese_clip.configuration_chinese_clip.ChineseCLIPConfig
from ...configuration_utils import PretrainedConfig class ChineseCLIPConfig(PretrainedConfig): """ [`ChineseCLIPConfig`] is the configuration class to store the configuration of a [`ChineseCLIPModel`]. It is used to instantiate Chinese-CLIP model according to the specified arguments, defining the text model and vision model configs. Instantiating a configuration with the defaults will yield a similar configuration to that of the Chinese-CLIP [OFA-Sys/chinese-clip-vit-base-patch16](https://huggingface.co/OFA-Sys/chinese-clip-vit-base-patch16) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: text_config (`dict`, *optional*): Dictionary of configuration options used to initialize [`ChineseCLIPTextConfig`]. vision_config (`dict`, *optional*): Dictionary of configuration options used to initialize [`ChineseCLIPVisionConfig`]. projection_dim (`int`, *optional*, defaults to 512): Dimensionality of text and vision projection layers. logit_scale_init_value (`float`, *optional*, defaults to 2.6592): The initial value of the *logit_scale* parameter. Default is used as per the original ChineseCLIP implementation. kwargs (*optional*): Dictionary of keyword arguments. Example: ```python >>> from transformers import ChineseCLIPConfig, ChineseCLIPModel >>> # Initializing a ChineseCLIPConfig with OFA-Sys/chinese-clip-vit-base-patch16 style configuration >>> configuration = ChineseCLIPConfig() >>> # Initializing a ChineseCLIPModel (with random weights) from the OFA-Sys/chinese-clip-vit-base-patch16 style configuration >>> model = ChineseCLIPModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config >>> # We can also initialize a ChineseCLIPConfig from a ChineseCLIPTextConfig and a ChineseCLIPVisionConfig >>> # Initializing a ChineseCLIPTextConfig and ChineseCLIPVisionConfig configuration >>> config_text = ChineseCLIPTextConfig() >>> config_vision = ChineseCLIPVisionConfig() >>> config = ChineseCLIPConfig.from_text_vision_configs(config_text, config_vision) ```""" model_type = 'chinese_clip' sub_configs = {'text_config': ChineseCLIPTextConfig, 'vision_config': ChineseCLIPVisionConfig} def __init__(self, text_config=None, vision_config=None, projection_dim=512, logit_scale_init_value=2.6592, **kwargs): text_config_dict = kwargs.pop('text_config_dict', None) vision_config_dict = kwargs.pop('vision_config_dict', None) super().__init__(**kwargs) if text_config_dict is not None: if text_config is None: text_config = {} _text_config_dict = ChineseCLIPTextConfig(**text_config_dict).to_dict() for key, value in _text_config_dict.items(): if key in text_config and value != text_config[key] and (key not in ['transformers_version']): if key in text_config_dict: message = f'`{key}` is found in both `text_config_dict` and `text_config` but with different values. The value `text_config_dict["{key}"]` will be used instead.' else: message = f'`text_config_dict` is provided which will be used to initialize `ChineseCLIPTextConfig`. The value `text_config["{key}"]` will be overridden.' logger.info(message) text_config.update(_text_config_dict) if vision_config_dict is not None: if vision_config is None: vision_config = {} _vision_config_dict = ChineseCLIPVisionConfig(**vision_config_dict).to_dict() if 'id2label' in _vision_config_dict: _vision_config_dict['id2label'] = {str(key): value for key, value in _vision_config_dict['id2label'].items()} for key, value in _vision_config_dict.items(): if key in vision_config and value != vision_config[key] and (key not in ['transformers_version']): if key in vision_config_dict: message = f'`{key}` is found in both `vision_config_dict` and `vision_config` but with different values. The value `vision_config_dict["{key}"]` will be used instead.' else: message = f'`vision_config_dict` is provided which will be used to initialize `ChineseCLIPVisionConfig`. The value `vision_config["{key}"]` will be overridden.' logger.info(message) vision_config.update(_vision_config_dict) if text_config is None: text_config = {} logger.info('`text_config` is `None`. Initializing the `ChineseCLIPTextConfig` with default values.') if vision_config is None: vision_config = {} logger.info('`vision_config` is `None`. initializing the `ChineseCLIPVisionConfig` with default values.') self.text_config = ChineseCLIPTextConfig(**text_config) self.vision_config = ChineseCLIPVisionConfig(**vision_config) self.projection_dim = projection_dim self.logit_scale_init_value = logit_scale_init_value self.initializer_factor = 1.0 self.initializer_range = 0.02
class ChineseCLIPConfig(PretrainedConfig): ''' [`ChineseCLIPConfig`] is the configuration class to store the configuration of a [`ChineseCLIPModel`]. It is used to instantiate Chinese-CLIP model according to the specified arguments, defining the text model and vision model configs. Instantiating a configuration with the defaults will yield a similar configuration to that of the Chinese-CLIP [OFA-Sys/chinese-clip-vit-base-patch16](https://huggingface.co/OFA-Sys/chinese-clip-vit-base-patch16) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: text_config (`dict`, *optional*): Dictionary of configuration options used to initialize [`ChineseCLIPTextConfig`]. vision_config (`dict`, *optional*): Dictionary of configuration options used to initialize [`ChineseCLIPVisionConfig`]. projection_dim (`int`, *optional*, defaults to 512): Dimensionality of text and vision projection layers. logit_scale_init_value (`float`, *optional*, defaults to 2.6592): The initial value of the *logit_scale* parameter. Default is used as per the original ChineseCLIP implementation. kwargs (*optional*): Dictionary of keyword arguments. Example: ```python >>> from transformers import ChineseCLIPConfig, ChineseCLIPModel >>> # Initializing a ChineseCLIPConfig with OFA-Sys/chinese-clip-vit-base-patch16 style configuration >>> configuration = ChineseCLIPConfig() >>> # Initializing a ChineseCLIPModel (with random weights) from the OFA-Sys/chinese-clip-vit-base-patch16 style configuration >>> model = ChineseCLIPModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config >>> # We can also initialize a ChineseCLIPConfig from a ChineseCLIPTextConfig and a ChineseCLIPVisionConfig >>> # Initializing a ChineseCLIPTextConfig and ChineseCLIPVisionConfig configuration >>> config_text = ChineseCLIPTextConfig() >>> config_vision = ChineseCLIPVisionConfig() >>> config = ChineseCLIPConfig.from_text_vision_configs(config_text, config_vision) ```''' def __init__(self, text_config=None, vision_config=None, projection_dim=512, logit_scale_init_value=2.6592, **kwargs): pass
2
1
49
7
31
11
8
0.86
1
4
2
0
1
6
2
2
150
27
66
22
58
57
45
17
42
14
1
4
15
1,148
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/chinese_clip/configuration_chinese_clip.py
transformers.models.chinese_clip.configuration_chinese_clip.ChineseCLIPOnnxConfig
from ...onnx import OnnxConfig from collections import OrderedDict from collections.abc import Mapping from typing import TYPE_CHECKING, Any class ChineseCLIPOnnxConfig(OnnxConfig): @property def inputs(self) -> Mapping[str, Mapping[int, str]]: return OrderedDict([('input_ids', {0: 'batch', 1: 'sequence'}), ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ('attention_mask', {0: 'batch', 1: 'sequence'})]) @property def outputs(self) -> Mapping[str, Mapping[int, str]]: return OrderedDict([('logits_per_image', {0: 'batch'}), ('logits_per_text', {0: 'batch'}), ('text_embeds', {0: 'batch'}), ('image_embeds', {0: 'batch'})]) @property def atol_for_validation(self) -> float: return 0.0001 def generate_dummy_inputs(self, processor: 'ProcessorMixin', batch_size: int=-1, seq_length: int=-1) -> Mapping[str, Any]: text_input_dict = super().generate_dummy_inputs(processor.tokenizer, batch_size=batch_size, seq_length=seq_length) image_input_dict = super().generate_dummy_inputs(processor.image_processor, batch_size=batch_size) return {**text_input_dict, **image_input_dict} @property def default_onnx_opset(self) -> int: return 14
class ChineseCLIPOnnxConfig(OnnxConfig): @property def inputs(self) -> Mapping[str, Mapping[int, str]]: pass @property def outputs(self) -> Mapping[str, Mapping[int, str]]: pass @property def atol_for_validation(self) -> float: pass def generate_dummy_inputs(self, processor: 'ProcessorMixin', batch_size: int=-1, seq_length: int=-1) -> Mapping[str, Any]: pass @property def default_onnx_opset(self) -> int: pass
10
0
7
0
7
0
1
0
1
6
0
0
5
0
5
5
44
4
40
18
24
0
13
8
7
1
1
0
5
1,149
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/chinese_clip/configuration_chinese_clip.py
transformers.models.chinese_clip.configuration_chinese_clip.ChineseCLIPTextConfig
from ...configuration_utils import PretrainedConfig class ChineseCLIPTextConfig(PretrainedConfig): """ This is the configuration class to store the configuration of a [`ChineseCLIPModel`]. It is used to instantiate a Chinese CLIP model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Chinese CLIP [OFA-Sys/chinese-clip-vit-base-patch16](https: //huggingface.co/OFA-Sys/chinese-clip-vit-base-patch16) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 30522): Vocabulary size of the CHINESE_CLIP model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`ChineseCLIPModel`]. hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (`int`, *optional*, defaults to 3072): Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder. hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. hidden_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout ratio for the attention probabilities. max_position_embeddings (`int`, *optional*, defaults to 512): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). type_vocab_size (`int`, *optional*, defaults to 2): The vocabulary size of the `token_type_ids` passed when calling [`ChineseCLIPModel`]. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. initializer_factor (`float`, *optional*, defaults to 1.0): A factor for initializing all weight matrices (should be kept to 1, used internally for initialization testing). layer_norm_eps (`float`, *optional*, defaults to 1e-12): The epsilon used by the layer normalization layers. pad_token_id (`int`, *optional*, defaults to 0): Padding token id. position_embedding_type (`str`, *optional*, defaults to `"absolute"`): Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to [Self-Attention with Relative Position Representations (Shaw et al.)](https://huggingface.co/papers/1803.02155). For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models with Better Relative Position Embeddings (Huang et al.)](https://huggingface.co/papers/2009.13658). use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). Only relevant if `config.is_decoder=True`. Example: ```python >>> from transformers import ChineseCLIPTextConfig, ChineseCLIPTextModel >>> # Initializing a ChineseCLIPTextConfig with OFA-Sys/chinese-clip-vit-base-patch16 style configuration >>> configuration = ChineseCLIPTextConfig() >>> # Initializing a ChineseCLIPTextModel (with random weights) from the OFA-Sys/chinese-clip-vit-base-patch16 style configuration >>> model = ChineseCLIPTextModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = 'chinese_clip_text_model' base_config_key = 'text_config' def __init__(self, vocab_size=30522, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, initializer_factor=1.0, layer_norm_eps=1e-12, pad_token_id=0, position_embedding_type='absolute', use_cache=True, **kwargs): super().__init__(pad_token_id=pad_token_id, **kwargs) self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.hidden_act = hidden_act self.intermediate_size = intermediate_size self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.initializer_range = initializer_range self.initializer_factor = initializer_factor self.layer_norm_eps = layer_norm_eps self.position_embedding_type = position_embedding_type self.use_cache = use_cache
class ChineseCLIPTextConfig(PretrainedConfig): ''' This is the configuration class to store the configuration of a [`ChineseCLIPModel`]. It is used to instantiate a Chinese CLIP model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Chinese CLIP [OFA-Sys/chinese-clip-vit-base-patch16](https: //huggingface.co/OFA-Sys/chinese-clip-vit-base-patch16) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 30522): Vocabulary size of the CHINESE_CLIP model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`ChineseCLIPModel`]. hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (`int`, *optional*, defaults to 3072): Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder. hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. hidden_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout ratio for the attention probabilities. max_position_embeddings (`int`, *optional*, defaults to 512): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). type_vocab_size (`int`, *optional*, defaults to 2): The vocabulary size of the `token_type_ids` passed when calling [`ChineseCLIPModel`]. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. initializer_factor (`float`, *optional*, defaults to 1.0): A factor for initializing all weight matrices (should be kept to 1, used internally for initialization testing). layer_norm_eps (`float`, *optional*, defaults to 1e-12): The epsilon used by the layer normalization layers. pad_token_id (`int`, *optional*, defaults to 0): Padding token id. position_embedding_type (`str`, *optional*, defaults to `"absolute"`): Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to [Self-Attention with Relative Position Representations (Shaw et al.)](https://huggingface.co/papers/1803.02155). For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models with Better Relative Position Embeddings (Huang et al.)](https://huggingface.co/papers/2009.13658). use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). Only relevant if `config.is_decoder=True`. Example: ```python >>> from transformers import ChineseCLIPTextConfig, ChineseCLIPTextModel >>> # Initializing a ChineseCLIPTextConfig with OFA-Sys/chinese-clip-vit-base-patch16 style configuration >>> configuration = ChineseCLIPTextConfig() >>> # Initializing a ChineseCLIPTextModel (with random weights) from the OFA-Sys/chinese-clip-vit-base-patch16 style configuration >>> model = ChineseCLIPTextModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```''' def __init__(self, vocab_size=30522, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, initializer_factor=1.0, layer_norm_eps=1e-12, pad_token_id=0, position_embedding_type='absolute', use_cache=True, **kwargs): pass
2
1
37
1
36
0
1
1.54
1
1
0
0
1
15
1
1
110
11
39
38
18
60
20
19
18
1
1
0
1
1,150
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/chinese_clip/configuration_chinese_clip.py
transformers.models.chinese_clip.configuration_chinese_clip.ChineseCLIPVisionConfig
from ...configuration_utils import PretrainedConfig class ChineseCLIPVisionConfig(PretrainedConfig): """ This is the configuration class to store the configuration of a [`ChineseCLIPModel`]. It is used to instantiate an ChineseCLIP model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the ChineseCLIP [OFA-Sys/chinese-clip-vit-base-patch16](https://huggingface.co/OFA-Sys/chinese-clip-vit-base-patch16) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. intermediate_size (`int`, *optional*, defaults to 3072): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. projection_dim (`int`, *optional*, defaults to 512): Dimensionality of text and vision projection layers. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. num_channels (`int`, *optional*, defaults to 3): The number of input channels. image_size (`int`, *optional*, defaults to 224): The size (resolution) of each image. patch_size (`int`, *optional*, defaults to 32): The size (resolution) of each patch. hidden_act (`str` or `function`, *optional*, defaults to `"quick_gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` `"quick_gelu"` are supported. layer_norm_eps (`float`, *optional*, defaults to 1e-05): The epsilon used by the layer normalization layers. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. initializer_factor (`float`, *optional*, defaults to 1.0): A factor for initializing all weight matrices (should be kept to 1, used internally for initialization testing). Example: ```python >>> from transformers import ChineseCLIPVisionConfig, ChineseCLIPVisionModel >>> # Initializing a ChineseCLIPVisionConfig with OFA-Sys/chinese-clip-vit-base-patch16 style configuration >>> configuration = ChineseCLIPVisionConfig() >>> # Initializing a ChineseCLIPVisionModel (with random weights) from the OFA-Sys/chinese-clip-vit-base-patch16 style configuration >>> model = ChineseCLIPVisionModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = 'chinese_clip_vision_model' base_config_key = 'vision_config' def __init__(self, hidden_size=768, intermediate_size=3072, projection_dim=512, num_hidden_layers=12, num_attention_heads=12, num_channels=3, image_size=224, patch_size=32, hidden_act='quick_gelu', layer_norm_eps=1e-05, attention_dropout=0.0, initializer_range=0.02, initializer_factor=1.0, **kwargs): super().__init__(**kwargs) self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.projection_dim = projection_dim self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.num_channels = num_channels self.patch_size = patch_size self.image_size = image_size self.initializer_range = initializer_range self.initializer_factor = initializer_factor self.attention_dropout = attention_dropout self.layer_norm_eps = layer_norm_eps self.hidden_act = hidden_act
class ChineseCLIPVisionConfig(PretrainedConfig): ''' This is the configuration class to store the configuration of a [`ChineseCLIPModel`]. It is used to instantiate an ChineseCLIP model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the ChineseCLIP [OFA-Sys/chinese-clip-vit-base-patch16](https://huggingface.co/OFA-Sys/chinese-clip-vit-base-patch16) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. intermediate_size (`int`, *optional*, defaults to 3072): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. projection_dim (`int`, *optional*, defaults to 512): Dimensionality of text and vision projection layers. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. num_channels (`int`, *optional*, defaults to 3): The number of input channels. image_size (`int`, *optional*, defaults to 224): The size (resolution) of each image. patch_size (`int`, *optional*, defaults to 32): The size (resolution) of each patch. hidden_act (`str` or `function`, *optional*, defaults to `"quick_gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` `"quick_gelu"` are supported. layer_norm_eps (`float`, *optional*, defaults to 1e-05): The epsilon used by the layer normalization layers. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. initializer_factor (`float`, *optional*, defaults to 1.0): A factor for initializing all weight matrices (should be kept to 1, used internally for initialization testing). Example: ```python >>> from transformers import ChineseCLIPVisionConfig, ChineseCLIPVisionModel >>> # Initializing a ChineseCLIPVisionConfig with OFA-Sys/chinese-clip-vit-base-patch16 style configuration >>> configuration = ChineseCLIPVisionConfig() >>> # Initializing a ChineseCLIPVisionModel (with random weights) from the OFA-Sys/chinese-clip-vit-base-patch16 style configuration >>> model = ChineseCLIPVisionModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```''' def __init__(self, hidden_size=768, intermediate_size=3072, projection_dim=512, num_hidden_layers=12, num_attention_heads=12, num_channels=3, image_size=224, patch_size=32, hidden_act='quick_gelu', layer_norm_eps=1e-05, attention_dropout=0.0, initializer_range=0.02, initializer_factor=1.0, **kwargs): pass
2
1
32
1
31
0
1
1.35
1
1
0
0
1
13
1
1
89
9
34
33
16
46
18
17
16
1
1
0
1
1,151
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/chinese_clip/feature_extraction_chinese_clip.py
transformers.models.chinese_clip.feature_extraction_chinese_clip.ChineseCLIPFeatureExtractor
import warnings from .image_processing_chinese_clip import ChineseCLIPImageProcessor from ...utils.import_utils import requires @requires(backends=('vision',)) class ChineseCLIPFeatureExtractor(ChineseCLIPImageProcessor): def __init__(self, *args, **kwargs) -> None: warnings.warn('The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please use ChineseCLIPImageProcessor instead.', FutureWarning) super().__init__(*args, **kwargs)
@requires(backends=('vision',)) class ChineseCLIPFeatureExtractor(ChineseCLIPImageProcessor): def __init__(self, *args, **kwargs) -> None: pass
3
0
7
0
7
0
1
0
1
2
0
0
1
0
1
24
8
0
8
2
6
0
4
2
2
1
4
0
1
1,152
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/chinese_clip/image_processing_chinese_clip.py
transformers.models.chinese_clip.image_processing_chinese_clip.ChineseCLIPImageProcessor
from ...utils.import_utils import requires from ...image_utils import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, infer_channel_dimension_format, is_scaled_image, make_flat_list_of_images, to_numpy_array, valid_images, validate_preprocess_arguments import numpy as np from ...utils import TensorType, filter_out_non_signature_kwargs, is_vision_available, logging from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from typing import Optional, Union from ...image_transforms import convert_to_rgb, get_resize_output_image_size, resize, to_channel_dimension_format @requires(backends=('vision',)) class ChineseCLIPImageProcessor(BaseImageProcessor): """ Constructs a Chinese-CLIP image processor. Args: do_resize (`bool`, *optional*, defaults to `True`): Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by `do_resize` in the `preprocess` method. size (`dict[str, int]` *optional*, defaults to `{"shortest_edge": 224}`): Size of the image after resizing. The shortest edge of the image is resized to size["shortest_edge"], with the longest edge resized to keep the input aspect ratio. Can be overridden by `size` in the `preprocess` method. resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`): Resampling filter to use if resizing the image. Can be overridden by `resample` in the `preprocess` method. do_center_crop (`bool`, *optional*, defaults to `True`): Whether to center crop the image to the specified `crop_size`. Can be overridden by `do_center_crop` in the `preprocess` method. crop_size (`dict[str, int]` *optional*, defaults to 224): Size of the output image after applying `center_crop`. Can be overridden by `crop_size` in the `preprocess` method. do_rescale (`bool`, *optional*, defaults to `True`): Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by `do_rescale` in the `preprocess` method. rescale_factor (`int` or `float`, *optional*, defaults to `1/255`): Scale factor to use if rescaling the image. Can be overridden by `rescale_factor` in the `preprocess` method. do_normalize (`bool`, *optional*, defaults to `True`): Whether to normalize the image. Can be overridden by `do_normalize` in the `preprocess` method. image_mean (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`): Mean to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. image_std (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`): Standard deviation to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method. Can be overridden by the `image_std` parameter in the `preprocess` method. do_convert_rgb (`bool`, *optional*, defaults to `True`): Whether to convert the image to RGB. """ model_input_names = ['pixel_values'] def __init__(self, do_resize: bool=True, size: Optional[dict[str, int]]=None, resample: PILImageResampling=PILImageResampling.BICUBIC, do_center_crop: bool=True, crop_size: Optional[dict[str, int]]=None, do_rescale: bool=True, rescale_factor: Union[int, float]=1 / 255, do_normalize: bool=True, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, do_convert_rgb: bool=True, **kwargs) -> None: super().__init__(**kwargs) size = size if size is not None else {'shortest_edge': 224} size = get_size_dict(size, default_to_square=False) crop_size = crop_size if crop_size is not None else {'height': 224, 'width': 224} crop_size = get_size_dict(crop_size) self.do_resize = do_resize self.size = size self.resample = resample self.do_center_crop = do_center_crop self.crop_size = crop_size self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_normalize = do_normalize self.image_mean = image_mean if image_mean is not None else OPENAI_CLIP_MEAN self.image_std = image_std if image_std is not None else OPENAI_CLIP_STD self.do_convert_rgb = do_convert_rgb def resize(self, image: np.ndarray, size: dict[str, int], resample: PILImageResampling=PILImageResampling.BICUBIC, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray: """ Resize an image. The shortest edge of the image is resized to size["shortest_edge"], with the longest edge resized to keep the input aspect ratio. Args: image (`np.ndarray`): Image to resize. size (`dict[str, int]`): Size of the output image. resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`): Resampling filter to use when resiizing the image. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the image. If not provided, it will be the same as the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred from the input image. """ size = get_size_dict(size, default_to_square=False) output_size = get_resize_output_image_size(image, size=(size['height'], size['width']), default_to_square=False, input_data_format=input_data_format) return resize(image, size=output_size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs) @filter_out_non_signature_kwargs() def preprocess(self, images: ImageInput, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, resample: Optional[PILImageResampling]=None, do_center_crop: Optional[bool]=None, crop_size: Optional[int]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, do_convert_rgb: Optional[bool]=None, return_tensors: Optional[Union[str, TensorType]]=None, data_format: Optional[ChannelDimension]=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> PIL.Image.Image: """ Preprocess an image or batch of images. Args: images (`ImageInput`): Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If passing in images with pixel values between 0 and 1, set `do_rescale=False`. do_resize (`bool`, *optional*, defaults to `self.do_resize`): Whether to resize the image. size (`dict[str, int]`, *optional*, defaults to `self.size`): Size of the image after resizing. Shortest edge of the image is resized to size["shortest_edge"], with the longest edge resized to keep the input aspect ratio. resample (`int`, *optional*, defaults to `self.resample`): Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`. Only has an effect if `do_resize` is set to `True`. do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`): Whether to center crop the image. crop_size (`dict[str, int]`, *optional*, defaults to `self.crop_size`): Size of the center crop. Only has an effect if `do_center_crop` is set to `True`. do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): Whether to rescale the image. rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`): Rescale factor to rescale the image by if `do_rescale` is set to `True`. do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): Whether to normalize the image. image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`): Image mean to use for normalization. Only has an effect if `do_normalize` is set to `True`. image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`): Image standard deviation to use for normalization. Only has an effect if `do_normalize` is set to `True`. do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`): Whether to convert the image to RGB. return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Can be one of: - Unset: Return a list of `np.ndarray`. - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format for the output image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - Unset: Use the channel dimension format of the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. """ do_resize = do_resize if do_resize is not None else self.do_resize size = size if size is not None else self.size size = get_size_dict(size, default_to_square=False) resample = resample if resample is not None else self.resample do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop crop_size = crop_size if crop_size is not None else self.crop_size crop_size = get_size_dict(crop_size) do_rescale = do_rescale if do_rescale is not None else self.do_rescale rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor do_normalize = do_normalize if do_normalize is not None else self.do_normalize image_mean = image_mean if image_mean is not None else self.image_mean image_std = image_std if image_std is not None else self.image_std do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb images = make_flat_list_of_images(images) if not valid_images(images): raise ValueError('Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, or torch.Tensor') validate_preprocess_arguments(do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_center_crop=do_center_crop, crop_size=crop_size, do_resize=do_resize, size=size, resample=resample) if do_convert_rgb: images = [convert_to_rgb(image) for image in images] images = [to_numpy_array(image) for image in images] if do_rescale and is_scaled_image(images[0]): logger.warning_once('It looks like you are trying to rescale already rescaled images. If the input images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again.') if input_data_format is None: input_data_format = infer_channel_dimension_format(images[0]) all_images = [] for image in images: if do_resize: image = self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format) if do_center_crop: image = self.center_crop(image=image, size=crop_size, input_data_format=input_data_format) if do_rescale: image = self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format) if do_normalize: image = self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format) all_images.append(image) images = [to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in all_images] data = {'pixel_values': images} return BatchFeature(data=data, tensor_type=return_tensors)
@requires(backends=('vision',)) class ChineseCLIPImageProcessor(BaseImageProcessor): ''' Constructs a Chinese-CLIP image processor. Args: do_resize (`bool`, *optional*, defaults to `True`): Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by `do_resize` in the `preprocess` method. size (`dict[str, int]` *optional*, defaults to `{"shortest_edge": 224}`): Size of the image after resizing. The shortest edge of the image is resized to size["shortest_edge"], with the longest edge resized to keep the input aspect ratio. Can be overridden by `size` in the `preprocess` method. resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`): Resampling filter to use if resizing the image. Can be overridden by `resample` in the `preprocess` method. do_center_crop (`bool`, *optional*, defaults to `True`): Whether to center crop the image to the specified `crop_size`. Can be overridden by `do_center_crop` in the `preprocess` method. crop_size (`dict[str, int]` *optional*, defaults to 224): Size of the output image after applying `center_crop`. Can be overridden by `crop_size` in the `preprocess` method. do_rescale (`bool`, *optional*, defaults to `True`): Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by `do_rescale` in the `preprocess` method. rescale_factor (`int` or `float`, *optional*, defaults to `1/255`): Scale factor to use if rescaling the image. Can be overridden by `rescale_factor` in the `preprocess` method. do_normalize (`bool`, *optional*, defaults to `True`): Whether to normalize the image. Can be overridden by `do_normalize` in the `preprocess` method. image_mean (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`): Mean to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. image_std (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`): Standard deviation to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method. Can be overridden by the `image_std` parameter in the `preprocess` method. do_convert_rgb (`bool`, *optional*, defaults to `True`): Whether to convert the image to RGB. ''' def __init__(self, do_resize: bool=True, size: Optional[dict[str, int]]=None, resample: PILImageResampling=PILImageResampling.BICUBIC, do_center_crop: bool=True, crop_size: Optional[dict[str, int]]=None, do_rescale: bool=True, rescale_factor: Union[int, float]=1 / 255, do_normalize: bool=True, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, do_convert_rgb: bool=True, **kwargs) -> None: pass def resize(self, image: np.ndarray, size: dict[str, int], resample: PILImageResampling=PILImageResampling.BICUBIC, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray: ''' Resize an image. The shortest edge of the image is resized to size["shortest_edge"], with the longest edge resized to keep the input aspect ratio. Args: image (`np.ndarray`): Image to resize. size (`dict[str, int]`): Size of the output image. resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`): Resampling filter to use when resiizing the image. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the image. If not provided, it will be the same as the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred from the input image. ''' pass @filter_out_non_signature_kwargs() def preprocess(self, images: ImageInput, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, resample: Optional[PILImageResampling]=None, do_center_crop: Optional[bool]=None, crop_size: Optional[int]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, do_convert_rgb: Optional[bool]=None, return_tensors: Optional[Union[str, TensorType]]=None, data_format: Optional[ChannelDimension]=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> PIL.Image.Image: ''' Preprocess an image or batch of images. Args: images (`ImageInput`): Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If passing in images with pixel values between 0 and 1, set `do_rescale=False`. do_resize (`bool`, *optional*, defaults to `self.do_resize`): Whether to resize the image. size (`dict[str, int]`, *optional*, defaults to `self.size`): Size of the image after resizing. Shortest edge of the image is resized to size["shortest_edge"], with the longest edge resized to keep the input aspect ratio. resample (`int`, *optional*, defaults to `self.resample`): Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`. Only has an effect if `do_resize` is set to `True`. do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`): Whether to center crop the image. crop_size (`dict[str, int]`, *optional*, defaults to `self.crop_size`): Size of the center crop. Only has an effect if `do_center_crop` is set to `True`. do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): Whether to rescale the image. rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`): Rescale factor to rescale the image by if `do_rescale` is set to `True`. do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): Whether to normalize the image. image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`): Image mean to use for normalization. Only has an effect if `do_normalize` is set to `True`. image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`): Image standard deviation to use for normalization. Only has an effect if `do_normalize` is set to `True`. do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`): Whether to convert the image to RGB. return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Can be one of: - Unset: Return a list of `np.ndarray`. - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format for the output image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - Unset: Use the channel dimension format of the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. ''' pass
6
3
71
5
43
23
9
0.78
1
8
2
1
3
11
3
23
257
20
133
59
89
104
62
19
58
21
3
2
27
1,153
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/chinese_clip/modeling_chinese_clip.py
transformers.models.chinese_clip.modeling_chinese_clip.ChineseCLIPModel
from .configuration_chinese_clip import ChineseCLIPConfig, ChineseCLIPTextConfig, ChineseCLIPVisionConfig import torch from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, BaseModelOutputWithPoolingAndCrossAttentions from typing import Any, Callable, Optional, Union from ...utils import ModelOutput, auto_docstring, can_return_tuple, filter_out_non_signature_kwargs, logging, torch_int from torch import nn @auto_docstring class ChineseCLIPModel(ChineseCLIPPreTrainedModel): config: ChineseCLIPConfig def __init__(self, config: ChineseCLIPConfig): super().__init__(config) if not isinstance(config.text_config, ChineseCLIPTextConfig): raise TypeError(f'config.text_config is expected to be of type ChineseCLIPTextConfig but is of type {type(config.text_config)}.') if not isinstance(config.vision_config, ChineseCLIPVisionConfig): raise TypeError(f'config.vision_config is expected to be of type ChineseCLIPVisionConfig but is of type {type(config.vision_config)}.') text_config = config.text_config vision_config = config.vision_config vision_config._attn_implementation = config._attn_implementation self.projection_dim = config.projection_dim self.text_embed_dim = text_config.hidden_size self.vision_embed_dim = vision_config.hidden_size self.text_model = ChineseCLIPTextModel(text_config, add_pooling_layer=False) self.vision_model = ChineseCLIPVisionTransformer(vision_config) self.visual_projection = nn.Linear(self.vision_embed_dim, self.projection_dim, bias=False) self.text_projection = nn.Linear(self.text_embed_dim, self.projection_dim, bias=False) self.logit_scale = nn.Parameter(torch.tensor(self.config.logit_scale_init_value)) self.post_init() @filter_out_non_signature_kwargs() @auto_docstring def get_text_features(self, input_ids: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None) -> torch.FloatTensor: """ Returns: text_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by applying the projection layer to the final [CLS] hidden state of Text-Transformer. Examples: ```python >>> import torch >>> from transformers import AutoTokenizer, ChineseCLIPModel >>> model = ChineseCLIPModel.from_pretrained("OFA-Sys/chinese-clip-vit-base-patch16") >>> tokenizer = AutoTokenizer.from_pretrained("OFA-Sys/chinese-clip-vit-base-patch16") >>> inputs = tokenizer(["杰尼龟", "妙蛙种子", "小火龙", "皮卡丘"], padding=True, return_tensors="pt") >>> with torch.inference_mode(): ... text_features = model.get_text_features(**inputs) >>> text_features = text_features / text_features.norm(p=2, dim=-1, keepdim=True) ```""" text_outputs: BaseModelOutputWithPooling = self.text_model(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids) pooled_output = text_outputs.pooler_output text_features = self.text_projection(pooled_output) return text_features @filter_out_non_signature_kwargs() @auto_docstring def get_image_features(self, pixel_values: torch.FloatTensor, interpolate_pos_encoding: bool=False) -> torch.FloatTensor: """ Returns: image_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The image embeddings obtained by applying the projection layer to the final [CLS] hidden state of Vision-Transformer. Examples: ```python >>> import torch >>> from transformers import AutoProcessor, ChineseCLIPModel >>> from transformers.image_utils import load_image >>> model = ChineseCLIPModel.from_pretrained("OFA-Sys/chinese-clip-vit-base-patch16") >>> processor = AutoProcessor.from_pretrained("OFA-Sys/chinese-clip-vit-base-patch16") >>> url = "https://clip-cn-beijing.oss-cn-beijing.aliyuncs.com/pokemon.jpeg" >>> image = load_image(url) >>> inputs = processor(images=image, return_tensors="pt") >>> with torch.inference_mode(): ... image_features = model.get_image_features(**inputs) >>> image_features = image_features / image_features.norm(p=2, dim=-1, keepdim=True) ```""" vision_outputs: BaseModelOutputWithPooling = self.vision_model(pixel_values=pixel_values, interpolate_pos_encoding=interpolate_pos_encoding) pooled_output = vision_outputs.pooler_output image_features = self.visual_projection(pooled_output) return image_features @can_return_tuple @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, pixel_values: Optional[torch.FloatTensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, return_loss: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, interpolate_pos_encoding: bool=False, return_dict: Optional[bool]=None) -> Union[tuple, ChineseCLIPOutput]: """ return_loss (`bool`, *optional*): Whether or not to return the contrastive loss. Examples: ```python >>> import torch >>> from transformers import AutoProcessor, ChineseCLIPModel >>> from transformers.image_utils import load_image >>> model = ChineseCLIPModel.from_pretrained("OFA-Sys/chinese-clip-vit-base-patch16") >>> processor = AutoProcessor.from_pretrained("OFA-Sys/chinese-clip-vit-base-patch16") >>> url = "https://clip-cn-beijing.oss-cn-beijing.aliyuncs.com/pokemon.jpeg" >>> image = load_image(url) >>> inputs = processor(text=["杰尼龟", "妙蛙种子", "小火龙", "皮卡丘"], images=image, return_tensors="pt", padding=True) >>> with torch.inference_mode(): ... outputs = model(**inputs) >>> logits_per_image = outputs.logits_per_image # this is the image-text similarity score >>> probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states vision_outputs = self.vision_model(pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, interpolate_pos_encoding=interpolate_pos_encoding, return_dict=True) text_outputs = self.text_model(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True) image_embeds = vision_outputs[1] image_embeds = self.visual_projection(image_embeds) text_embeds = text_outputs[0][:, 0, :] text_embeds = self.text_projection(text_embeds) image_embeds = image_embeds / image_embeds.norm(p=2, dim=-1, keepdim=True) text_embeds = text_embeds / text_embeds.norm(p=2, dim=-1, keepdim=True) logit_scale = self.logit_scale.exp() logits_per_text = torch.matmul(text_embeds, image_embeds.t()) * logit_scale logits_per_image = logits_per_text.t() loss = None if return_loss: loss = chinese_clip_loss(logits_per_text) return ChineseCLIPOutput(loss=loss, logits_per_image=logits_per_image, logits_per_text=logits_per_text, text_embeds=text_embeds, image_embeds=image_embeds, text_model_output=text_outputs, vision_model_output=vision_outputs)
@auto_docstring class ChineseCLIPModel(ChineseCLIPPreTrainedModel): def __init__(self, config: ChineseCLIPConfig): pass @filter_out_non_signature_kwargs() @auto_docstring def get_text_features(self, input_ids: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None) -> torch.FloatTensor: ''' Returns: text_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by applying the projection layer to the final [CLS] hidden state of Text-Transformer. Examples: ```python >>> import torch >>> from transformers import AutoTokenizer, ChineseCLIPModel >>> model = ChineseCLIPModel.from_pretrained("OFA-Sys/chinese-clip-vit-base-patch16") >>> tokenizer = AutoTokenizer.from_pretrained("OFA-Sys/chinese-clip-vit-base-patch16") >>> inputs = tokenizer(["杰尼龟", "妙蛙种子", "小火龙", "皮卡丘"], padding=True, return_tensors="pt") >>> with torch.inference_mode(): ... text_features = model.get_text_features(**inputs) >>> text_features = text_features / text_features.norm(p=2, dim=-1, keepdim=True) ```''' pass @filter_out_non_signature_kwargs() @auto_docstring def get_image_features(self, pixel_values: torch.FloatTensor, interpolate_pos_encoding: bool=False) -> torch.FloatTensor: ''' Returns: image_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The image embeddings obtained by applying the projection layer to the final [CLS] hidden state of Vision-Transformer. Examples: ```python >>> import torch >>> from transformers import AutoProcessor, ChineseCLIPModel >>> from transformers.image_utils import load_image >>> model = ChineseCLIPModel.from_pretrained("OFA-Sys/chinese-clip-vit-base-patch16") >>> processor = AutoProcessor.from_pretrained("OFA-Sys/chinese-clip-vit-base-patch16") >>> url = "https://clip-cn-beijing.oss-cn-beijing.aliyuncs.com/pokemon.jpeg" >>> image = load_image(url) >>> inputs = processor(images=image, return_tensors="pt") >>> with torch.inference_mode(): ... image_features = model.get_image_features(**inputs) >>> image_features = image_features / image_features.norm(p=2, dim=-1, keepdim=True) ```''' pass @can_return_tuple @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, pixel_values: Optional[torch.FloatTensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, return_loss: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, interpolate_pos_encoding: bool=False, return_dict: Optional[bool]=None) -> Union[tuple, ChineseCLIPOutput]: ''' return_loss (`bool`, *optional*): Whether or not to return the contrastive loss. Examples: ```python >>> import torch >>> from transformers import AutoProcessor, ChineseCLIPModel >>> from transformers.image_utils import load_image >>> model = ChineseCLIPModel.from_pretrained("OFA-Sys/chinese-clip-vit-base-patch16") >>> processor = AutoProcessor.from_pretrained("OFA-Sys/chinese-clip-vit-base-patch16") >>> url = "https://clip-cn-beijing.oss-cn-beijing.aliyuncs.com/pokemon.jpeg" >>> image = load_image(url) >>> inputs = processor(text=["杰尼龟", "妙蛙种子", "小火龙", "皮卡丘"], images=image, return_tensors="pt", padding=True) >>> with torch.inference_mode(): ... outputs = model(**inputs) >>> logits_per_image = outputs.logits_per_image # this is the image-text similarity score >>> probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities ```''' pass
12
3
56
10
34
14
5
0.39
1
11
6
0
4
8
4
5
235
42
140
63
103
54
60
32
55
8
2
2
19
1,154
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/chinese_clip/modeling_chinese_clip.py
transformers.models.chinese_clip.modeling_chinese_clip.ChineseCLIPOutput
from dataclasses import dataclass import torch from ...utils import ModelOutput, auto_docstring, can_return_tuple, filter_out_non_signature_kwargs, logging, torch_int from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, BaseModelOutputWithPoolingAndCrossAttentions from typing import Any, Callable, Optional, Union @dataclass @auto_docstring class ChineseCLIPOutput(ModelOutput): """ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`): Contrastive loss for image-text similarity. logits_per_image (`torch.FloatTensor` of shape `(image_batch_size, text_batch_size)`): The scaled dot product scores between `image_embeds` and `text_embeds`. This represents the image-text similarity scores. logits_per_text (`torch.FloatTensor` of shape `(text_batch_size, image_batch_size)`): The scaled dot product scores between `text_embeds` and `image_embeds`. This represents the text-image similarity scores. text_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by applying the projection layer to the pooled output of [`ChineseCLIPTextModel`]. image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim`): The image embeddings obtained by applying the projection layer to the pooled output of [`ChineseCLIPVisionModel`]. text_model_output (`BaseModelOutputWithPoolingAndCrossAttentions`): The output of the [`ChineseCLIPTextModel`]. vision_model_output (`BaseModelOutputWithPoolingAndCrossAttentions`): The output of the [`ChineseCLIPVisionModel`]. """ loss: Optional[torch.FloatTensor] = None logits_per_image: Optional[torch.FloatTensor] = None logits_per_text: Optional[torch.FloatTensor] = None text_embeds: Optional[torch.FloatTensor] = None image_embeds: Optional[torch.FloatTensor] = None text_model_output: BaseModelOutputWithPoolingAndCrossAttentions = None vision_model_output: BaseModelOutputWithPoolingAndCrossAttentions = None def to_tuple(self) -> tuple[Any]: return tuple((self[k] if k not in ['text_model_output', 'vision_model_output'] else getattr(self, k).to_tuple() for k in self.keys()))
@dataclass @auto_docstring class ChineseCLIPOutput(ModelOutput): ''' loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`): Contrastive loss for image-text similarity. logits_per_image (`torch.FloatTensor` of shape `(image_batch_size, text_batch_size)`): The scaled dot product scores between `image_embeds` and `text_embeds`. This represents the image-text similarity scores. logits_per_text (`torch.FloatTensor` of shape `(text_batch_size, image_batch_size)`): The scaled dot product scores between `text_embeds` and `image_embeds`. This represents the text-image similarity scores. text_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by applying the projection layer to the pooled output of [`ChineseCLIPTextModel`]. image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim`): The image embeddings obtained by applying the projection layer to the pooled output of [`ChineseCLIPVisionModel`]. text_model_output (`BaseModelOutputWithPoolingAndCrossAttentions`): The output of the [`ChineseCLIPTextModel`]. vision_model_output (`BaseModelOutputWithPoolingAndCrossAttentions`): The output of the [`ChineseCLIPVisionModel`]. ''' def to_tuple(self) -> tuple[Any]: pass
4
1
5
0
5
0
2
1.62
1
2
0
0
1
0
1
1
36
2
13
9
11
21
10
9
8
2
1
0
2
1,155
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/chinese_clip/modeling_chinese_clip.py
transformers.models.chinese_clip.modeling_chinese_clip.ChineseCLIPPreTrainedModel
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel from torch import nn from ...utils import ModelOutput, auto_docstring, can_return_tuple, filter_out_non_signature_kwargs, logging, torch_int from .configuration_chinese_clip import ChineseCLIPConfig, ChineseCLIPTextConfig, ChineseCLIPVisionConfig @auto_docstring class ChineseCLIPPreTrainedModel(PreTrainedModel): config: ChineseCLIPConfig base_model_prefix = 'chinese_clip' supports_gradient_checkpointing = True def _init_weights(self, module): """Initialize the weights""" factor = self.config.initializer_factor if isinstance(module, ChineseCLIPVisionEmbeddings): factor = self.config.initializer_factor nn.init.normal_(module.class_embedding, mean=0.0, std=module.embed_dim ** (-0.5) * factor) nn.init.normal_(module.patch_embedding.weight, std=module.config.initializer_range * factor) nn.init.normal_(module.position_embedding.weight, std=module.config.initializer_range * factor) elif isinstance(module, ChineseCLIPTextEmbeddings): nn.init.normal_(module.word_embeddings.weight, mean=0.0, std=self.config.initializer_range) nn.init.normal_(module.position_embeddings.weight, mean=0.0, std=self.config.initializer_range) nn.init.normal_(module.token_type_embeddings.weight, mean=0.0, std=self.config.initializer_range) for embedding in [module.word_embeddings, module.position_embeddings, module.token_type_embeddings]: if embedding.padding_idx is not None: embedding.weight.data[embedding.padding_idx].zero_() elif isinstance(module, ChineseCLIPVisionAttention): factor = self.config.initializer_factor in_proj_std = module.embed_dim ** (-0.5) * (2 * module.config.num_hidden_layers) ** (-0.5) * factor out_proj_std = module.embed_dim ** (-0.5) * factor nn.init.normal_(module.q_proj.weight, std=in_proj_std) nn.init.normal_(module.k_proj.weight, std=in_proj_std) nn.init.normal_(module.v_proj.weight, std=in_proj_std) nn.init.normal_(module.out_proj.weight, std=out_proj_std) elif isinstance(module, ChineseCLIPVisionMLP): factor = self.config.initializer_factor in_proj_std = module.config.hidden_size ** (-0.5) * (2 * module.config.num_hidden_layers) ** (-0.5) * factor fc_std = (2 * module.config.hidden_size) ** (-0.5) * factor nn.init.normal_(module.fc1.weight, std=fc_std) nn.init.normal_(module.fc2.weight, std=in_proj_std) elif isinstance(module, ChineseCLIPModel): nn.init.normal_(module.text_projection.weight, std=module.text_embed_dim ** (-0.5) * self.config.initializer_factor) nn.init.normal_(module.visual_projection.weight, std=module.vision_embed_dim ** (-0.5) * self.config.initializer_factor) if isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_()
@auto_docstring class ChineseCLIPPreTrainedModel(PreTrainedModel): def _init_weights(self, module): '''Initialize the weights''' pass
3
1
46
1
44
1
11
0.1
1
5
5
3
1
0
1
1
56
3
48
10
46
5
38
10
36
11
1
3
11
1,156
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/chinese_clip/modeling_chinese_clip.py
transformers.models.chinese_clip.modeling_chinese_clip.ChineseCLIPTextAttention
from typing import Any, Callable, Optional, Union from torch import nn import torch from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer class ChineseCLIPTextAttention(nn.Module): def __init__(self, config): super().__init__() self.self = ChineseCLIPTextSelfAttention(config) self.output = ChineseCLIPTextSelfOutput(config) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices(heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads) self.self.query = prune_linear_layer(self.self.query, index) self.self.key = prune_linear_layer(self.self.key, index) self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False, **kwargs) -> tuple[torch.Tensor]: self_outputs = self.self(hidden_states, attention_mask=attention_mask, head_mask=head_mask, output_attentions=output_attentions, **kwargs) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] return outputs
class ChineseCLIPTextAttention(nn.Module): def __init__(self, config): pass def prune_heads(self, heads): pass def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False, **kwargs) -> tuple[torch.Tensor]: pass
4
0
15
1
14
1
1
0.07
1
5
1
0
3
3
3
13
49
4
43
20
30
3
22
11
18
2
1
1
4
1,157
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/chinese_clip/modeling_chinese_clip.py
transformers.models.chinese_clip.modeling_chinese_clip.ChineseCLIPTextEmbeddings
import torch from typing import Any, Callable, Optional, Union from torch import nn class ChineseCLIPTextEmbeddings(nn.Module): """Construct the embeddings from word, position and token_type embeddings.""" def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.position_embedding_type = getattr(config, 'position_embedding_type', 'absolute') self.register_buffer('position_ids', torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False) self.register_buffer('token_type_ids', torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False) def forward(self, input_ids: Optional[torch.LongTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None) -> torch.Tensor: if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] seq_length = input_shape[1] if position_ids is None: position_ids = self.position_ids[:, :seq_length] if token_type_ids is None: if hasattr(self, 'token_type_ids'): buffered_token_type_ids = self.token_type_ids[:, :seq_length] buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length) token_type_ids = buffered_token_type_ids_expanded else: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = inputs_embeds + token_type_embeddings if self.position_embedding_type == 'absolute': position_embeddings = self.position_embeddings(position_ids) embeddings += position_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings
class ChineseCLIPTextEmbeddings(nn.Module): '''Construct the embeddings from word, position and token_type embeddings.''' def __init__(self, config): pass def forward(self, input_ids: Optional[torch.LongTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None) -> torch.Tensor: pass
3
1
29
3
23
3
4
0.15
1
3
0
0
2
6
2
12
62
8
47
23
37
7
34
16
31
7
1
2
8
1,158
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/chinese_clip/modeling_chinese_clip.py
transformers.models.chinese_clip.modeling_chinese_clip.ChineseCLIPTextEncoder
from torch import nn import torch from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, BaseModelOutputWithPoolingAndCrossAttentions from ...utils import ModelOutput, auto_docstring, can_return_tuple, filter_out_non_signature_kwargs, logging, torch_int from typing import Any, Callable, Optional, Union class ChineseCLIPTextEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.layer = nn.ModuleList([ChineseCLIPTextLayer(config) for i in range(config.num_hidden_layers)]) self.gradient_checkpointing = False @can_return_tuple def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False, output_hidden_states: Optional[bool]=False, return_dict: Optional[bool]=True, **kwargs) -> Union[tuple[torch.Tensor], BaseModelOutput]: all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None layer_outputs = layer_module(hidden_states=hidden_states, attention_mask=attention_mask, head_mask=layer_head_mask, output_attentions=output_attentions, **kwargs) hidden_states = layer_outputs[0] if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions)
class ChineseCLIPTextEncoder(nn.Module): def __init__(self, config): pass @can_return_tuple def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False, output_hidden_states: Optional[bool]=False, return_dict: Optional[bool]=True, **kwargs) -> Union[tuple[torch.Tensor], BaseModelOutput]: pass
4
0
45
4
41
0
9
0
1
8
2
0
2
3
2
12
91
8
83
26
68
0
35
14
32
17
1
3
18
1,159
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/chinese_clip/modeling_chinese_clip.py
transformers.models.chinese_clip.modeling_chinese_clip.ChineseCLIPTextIntermediate
from torch import nn from ...activations import ACT2FN import torch class ChineseCLIPTextIntermediate(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states
class ChineseCLIPTextIntermediate(nn.Module): def __init__(self, config): pass def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: pass
3
0
6
0
6
0
2
0
1
3
0
0
2
2
2
12
13
1
12
5
9
0
11
5
8
2
1
1
3
1,160
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/chinese_clip/modeling_chinese_clip.py
transformers.models.chinese_clip.modeling_chinese_clip.ChineseCLIPTextLayer
from ...modeling_layers import GradientCheckpointingLayer from typing import Any, Callable, Optional, Union from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer import torch class ChineseCLIPTextLayer(GradientCheckpointingLayer): def __init__(self, config): super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = ChineseCLIPTextAttention(config) self.intermediate = ChineseCLIPTextIntermediate(config) self.output = ChineseCLIPTextOutput(config) def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False, **kwargs) -> tuple[torch.Tensor]: self_attention_outputs = self.attention(hidden_states, attention_mask=attention_mask, head_mask=head_mask, output_attentions=output_attentions, **kwargs) attention_output = self_attention_outputs[0] outputs = self_attention_outputs[1:] layer_output = apply_chunking_to_forward(self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output) outputs = (layer_output,) + outputs return outputs def feed_forward_chunk(self, attention_output): intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) return layer_output
class ChineseCLIPTextLayer(GradientCheckpointingLayer): def __init__(self, config): pass def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False, **kwargs) -> tuple[torch.Tensor]: pass def feed_forward_chunk(self, attention_output): pass
4
0
27
2
23
2
4
0.1
1
7
3
0
3
8
3
13
84
9
70
32
57
7
41
23
37
7
1
2
11
1,161
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/chinese_clip/modeling_chinese_clip.py
transformers.models.chinese_clip.modeling_chinese_clip.ChineseCLIPTextModel
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, BaseModelOutputWithPoolingAndCrossAttentions from ...utils import ModelOutput, auto_docstring, can_return_tuple, filter_out_non_signature_kwargs, logging, torch_int import torch from typing import Any, Callable, Optional, Union from .configuration_chinese_clip import ChineseCLIPConfig, ChineseCLIPTextConfig, ChineseCLIPVisionConfig from ...cache_utils import Cache @auto_docstring(custom_intro='\n The text model from CHINESE_CLIP without any head or projection on top.\n ') class ChineseCLIPTextModel(ChineseCLIPPreTrainedModel): """ The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of cross-attention is added between the self-attention layers, following the architecture described in [Attention is all you need](https://huggingface.co/papers/1706.03762) by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin. To behave as an decoder the model needs to be initialized with the `is_decoder` argument of the configuration set to `True`. To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` argument and `add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass. """ config: ChineseCLIPTextConfig _no_split_modules = ['ChineseCLIPTextEmbeddings'] def __init__(self, config, add_pooling_layer=True): """ add_pooling_layer (bool, *optional*, defaults to `True`): Whether to add a pooling layer """ super().__init__(config) self.config = config self.embeddings = ChineseCLIPTextEmbeddings(config) self.encoder = ChineseCLIPTextEncoder(config) self.pooler = ChineseCLIPTextPooler(config) if add_pooling_layer else None self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @can_return_tuple @auto_docstring def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.Tensor], BaseModelOutputWithPooling]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time') elif input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError('You have to specify either input_ids or inputs_embeds') batch_size, seq_length = input_shape device = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: attention_mask = torch.ones((batch_size, seq_length), device=device) if token_type_ids is None: if hasattr(self.embeddings, 'token_type_ids'): buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length] buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length) token_type_ids = buffered_token_type_ids_expanded else: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape) embedding_output = self.embeddings(input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds) encoder_outputs = self.encoder(embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True) sequence_output = encoder_outputs[0] pooled_output = self.pooler(sequence_output) if self.pooler is not None else None return BaseModelOutputWithPooling(last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions)
@auto_docstring(custom_intro='\n The text model from CHINESE_CLIP without any head or projection on top.\n ') class ChineseCLIPTextModel(ChineseCLIPPreTrainedModel): ''' The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of cross-attention is added between the self-attention layers, following the architecture described in [Attention is all you need](https://huggingface.co/papers/1706.03762) by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin. To behave as an decoder the model needs to be initialized with the `is_decoder` argument of the configuration set to `True`. To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` argument and `add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass. ''' def __init__(self, config, add_pooling_layer=True): ''' add_pooling_layer (bool, *optional*, defaults to `True`): Whether to add a pooling layer ''' pass def get_input_embeddings(self): pass def set_input_embeddings(self, value): pass def _prune_heads(self, heads_to_prune): ''' Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel ''' pass @can_return_tuple @auto_docstring def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.Tensor], BaseModelOutputWithPooling]: pass
9
3
30
3
20
7
5
0.38
1
8
4
0
5
4
5
6
177
25
110
43
83
42
57
27
51
18
2
2
24
1,162
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/chinese_clip/modeling_chinese_clip.py
transformers.models.chinese_clip.modeling_chinese_clip.ChineseCLIPTextOutput
import torch from torch import nn class ChineseCLIPTextOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states
class ChineseCLIPTextOutput(nn.Module): def __init__(self, config): pass def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: pass
3
0
5
0
5
0
1
0
1
2
0
0
2
3
2
12
12
1
11
6
8
0
11
6
8
1
1
0
2
1,163
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/chinese_clip/modeling_chinese_clip.py
transformers.models.chinese_clip.modeling_chinese_clip.ChineseCLIPTextPooler
import torch from torch import nn class ChineseCLIPTextPooler(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.activation = nn.Tanh() def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(first_token_tensor) pooled_output = self.activation(pooled_output) return pooled_output
class ChineseCLIPTextPooler(nn.Module): def __init__(self, config): pass def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: pass
3
0
6
0
5
1
1
0.2
1
2
0
0
2
2
2
12
13
1
10
7
7
2
10
7
7
1
1
0
2
1,164
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/chinese_clip/modeling_chinese_clip.py
transformers.models.chinese_clip.modeling_chinese_clip.ChineseCLIPTextSelfAttention
from torch import nn from typing import Any, Callable, Optional, Union from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel import torch class ChineseCLIPTextSelfAttention(nn.Module): def __init__(self, config): super().__init__() if config.hidden_size % config.num_attention_heads != 0 and (not hasattr(config, 'embedding_size')): raise ValueError(f'The hidden size ({config.hidden_size}) is not a multiple of the number of attention heads ({config.num_attention_heads})') self.config = config self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.attention_dropout = config.attention_probs_dropout_prob self.scaling = self.attention_head_size ** (-0.5) def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False, **kwargs) -> tuple[torch.Tensor]: input_shape = hidden_states.shape[:-1] hidden_shape = (*input_shape, -1, self.attention_head_size) query_states = self.query(hidden_states).view(hidden_shape).transpose(1, 2) key_states = self.key(hidden_states).view(hidden_shape).transpose(1, 2) value_states = self.value(hidden_states).view(hidden_shape).transpose(1, 2) attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != 'eager': attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] attn_output, attn_weights = attention_interface(self, query_states, key_states, value_states, attention_mask, dropout=0.0 if not self.training else self.attention_dropout, scaling=self.scaling, head_mask=head_mask, **kwargs) attn_output = attn_output.reshape(*input_shape, -1).contiguous() outputs = (attn_output, attn_weights) if output_attentions else (attn_output,) return outputs
class ChineseCLIPTextSelfAttention(nn.Module): def __init__(self, config): pass def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False, **kwargs) -> tuple[torch.Tensor]: pass
3
0
43
7
31
6
6
0.19
1
5
0
0
3
11
3
13
132
22
93
44
80
18
72
35
68
13
1
2
17
1,165
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/chinese_clip/modeling_chinese_clip.py
transformers.models.chinese_clip.modeling_chinese_clip.ChineseCLIPTextSelfOutput
from torch import nn import torch class ChineseCLIPTextSelfOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states
class ChineseCLIPTextSelfOutput(nn.Module): def __init__(self, config): pass def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: pass
3
0
5
0
5
0
1
0
1
2
0
0
2
3
2
12
12
1
11
6
8
0
11
6
8
1
1
0
2
1,166
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/chinese_clip/modeling_chinese_clip.py
transformers.models.chinese_clip.modeling_chinese_clip.ChineseCLIPVisionAttention
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel import torch from torch import nn from typing import Any, Callable, Optional, Union class ChineseCLIPVisionAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__(self, config): super().__init__() self.config = config self.embed_dim = config.hidden_size self.num_heads = config.num_attention_heads self.head_dim = self.embed_dim // self.num_heads if self.head_dim * self.num_heads != self.embed_dim: raise ValueError(f'embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {self.num_heads}).') self.scale = self.head_dim ** (-0.5) self.dropout = config.attention_dropout self.k_proj = nn.Linear(self.embed_dim, self.embed_dim) self.v_proj = nn.Linear(self.embed_dim, self.embed_dim) self.q_proj = nn.Linear(self.embed_dim, self.embed_dim) self.out_proj = nn.Linear(self.embed_dim, self.embed_dim) def forward(self, hidden_states: torch.Tensor, output_attentions: Optional[bool]=False, **kwargs) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: """Input shape: Batch x Time x Channel""" input_shape = hidden_states.shape[:-1] hidden_shape = (*input_shape, -1, self.head_dim) query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2) * self.scale key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2) value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2) attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != 'eager': attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] attn_output, attn_weights = attention_interface(self, query_states, key_states, value_states, None, dropout=0.0 if not self.training else self.dropout, scaling=1.0, **kwargs) attn_output = attn_output.reshape(*input_shape, -1).contiguous() attn_output = self.out_proj(attn_output) return (attn_output, attn_weights)
class ChineseCLIPVisionAttention(nn.Module): '''Multi-headed attention from 'Attention Is All You Need' paper''' def __init__(self, config): pass def forward(self, hidden_states: torch.Tensor, output_attentions: Optional[bool]=False, **kwargs) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: '''Input shape: Batch x Time x Channel''' pass
3
2
26
5
19
2
2
0.12
1
5
0
0
3
10
3
13
82
17
58
28
50
7
44
24
40
4
1
1
7
1,167
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/chinese_clip/modeling_chinese_clip.py
transformers.models.chinese_clip.modeling_chinese_clip.ChineseCLIPVisionEmbeddings
from ...utils import ModelOutput, auto_docstring, can_return_tuple, filter_out_non_signature_kwargs, logging, torch_int from .configuration_chinese_clip import ChineseCLIPConfig, ChineseCLIPTextConfig, ChineseCLIPVisionConfig import torch from torch import nn class ChineseCLIPVisionEmbeddings(nn.Module): def __init__(self, config: ChineseCLIPVisionConfig): super().__init__() self.config = config self.embed_dim = config.hidden_size self.image_size = config.image_size self.patch_size = config.patch_size self.class_embedding = nn.Parameter(torch.randn(self.embed_dim)) self.patch_embedding = nn.Conv2d(in_channels=config.num_channels, out_channels=self.embed_dim, kernel_size=self.patch_size, stride=self.patch_size, bias=False) self.num_patches = (self.image_size // self.patch_size) ** 2 self.num_positions = self.num_patches + 1 self.position_embedding = nn.Embedding(self.num_positions, self.embed_dim) self.register_buffer('position_ids', torch.arange(self.num_positions).expand((1, -1)), persistent=False) def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor: """ This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution images. This method is also adapted to support torch.jit tracing. Adapted from: - https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174-L194, and - https://github.com/facebookresearch/dinov2/blob/e1277af2ba9496fbadf7aec6eba56e8d882d1e35/dinov2/models/vision_transformer.py#L179-L211 """ num_patches = embeddings.shape[1] - 1 position_embedding = self.position_embedding.weight.unsqueeze(0) num_positions = position_embedding.shape[1] - 1 if not torch.jit.is_tracing() and num_patches == num_positions and (height == width): return self.position_embedding(self.position_ids) class_pos_embed = position_embedding[:, :1] patch_pos_embed = position_embedding[:, 1:] dim = embeddings.shape[-1] new_height = height // self.patch_size new_width = width // self.patch_size sqrt_num_positions = torch_int(num_positions ** 0.5) patch_pos_embed = patch_pos_embed.reshape(1, sqrt_num_positions, sqrt_num_positions, dim) patch_pos_embed = patch_pos_embed.permute(0, 3, 1, 2) patch_pos_embed = nn.functional.interpolate(patch_pos_embed, size=(new_height, new_width), mode='bicubic', align_corners=False) patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim) return torch.cat((class_pos_embed, patch_pos_embed), dim=1) def forward(self, pixel_values: torch.FloatTensor, interpolate_pos_encoding=False) -> torch.Tensor: batch_size, _, height, width = pixel_values.shape if not interpolate_pos_encoding and (height != self.image_size or width != self.image_size): raise ValueError(f"Input image size ({height}*{width}) doesn't match model ({self.image_size}*{self.image_size}).") target_dtype = self.patch_embedding.weight.dtype patch_embeds = self.patch_embedding(pixel_values.to(dtype=target_dtype)) patch_embeds = patch_embeds.flatten(2).transpose(1, 2) class_embeds = self.class_embedding.expand(batch_size, 1, -1) embeddings = torch.cat([class_embeds, patch_embeds], dim=1) if interpolate_pos_encoding: embeddings = embeddings + self.interpolate_pos_encoding(embeddings, height, width) else: embeddings = embeddings + self.position_embedding(self.position_ids) return embeddings
class ChineseCLIPVisionEmbeddings(nn.Module): def __init__(self, config: ChineseCLIPVisionConfig): pass def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor: ''' This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution images. This method is also adapted to support torch.jit tracing. Adapted from: - https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174-L194, and - https://github.com/facebookresearch/dinov2/blob/e1277af2ba9496fbadf7aec6eba56e8d882d1e35/dinov2/models/vision_transformer.py#L179-L211 ''' pass def forward(self, pixel_values: torch.FloatTensor, interpolate_pos_encoding=False) -> torch.Tensor: pass
4
1
26
5
19
3
2
0.16
1
5
1
0
3
9
3
13
81
16
57
27
53
9
43
27
39
3
1
1
6
1,168
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/chinese_clip/modeling_chinese_clip.py
transformers.models.chinese_clip.modeling_chinese_clip.ChineseCLIPVisionEncoder
from ...utils import ModelOutput, auto_docstring, can_return_tuple, filter_out_non_signature_kwargs, logging, torch_int from .configuration_chinese_clip import ChineseCLIPConfig, ChineseCLIPTextConfig, ChineseCLIPVisionConfig from typing import Any, Callable, Optional, Union from torch import nn from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, BaseModelOutputWithPoolingAndCrossAttentions class ChineseCLIPVisionEncoder(nn.Module): """ Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a [`ChineseCLIPVisionEncoderLayer`]. Args: config: ChineseCLIPConfig """ def __init__(self, config: ChineseCLIPConfig): super().__init__() self.config = config self.layers = nn.ModuleList([ChineseCLIPVisionLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False @can_return_tuple def forward(self, inputs_embeds, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutput]: """ Args: inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None hidden_states = inputs_embeds for idx, encoder_layer in enumerate(self.layers): if output_hidden_states: encoder_states = encoder_states + (hidden_states,) layer_outputs = encoder_layer(hidden_states, output_attentions=output_attentions) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions)
class ChineseCLIPVisionEncoder(nn.Module): ''' Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a [`ChineseCLIPVisionEncoderLayer`]. Args: config: ChineseCLIPConfig ''' def __init__(self, config: ChineseCLIPConfig): pass @can_return_tuple def forward(self, inputs_embeds, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutput]: ''' Args: inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. ''' pass
4
2
33
3
22
8
7
0.47
1
8
3
0
2
3
2
12
75
9
45
17
36
21
27
11
24
12
1
2
13
1,169
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/chinese_clip/modeling_chinese_clip.py
transformers.models.chinese_clip.modeling_chinese_clip.ChineseCLIPVisionLayer
from typing import Any, Callable, Optional, Union from torch import nn from .configuration_chinese_clip import ChineseCLIPConfig, ChineseCLIPTextConfig, ChineseCLIPVisionConfig from ...modeling_layers import GradientCheckpointingLayer import torch class ChineseCLIPVisionLayer(GradientCheckpointingLayer): def __init__(self, config: ChineseCLIPConfig): super().__init__() self.embed_dim = config.hidden_size self.self_attn = ChineseCLIPVisionAttention(config) self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) self.mlp = ChineseCLIPVisionMLP(config) self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) def forward(self, hidden_states: torch.Tensor, output_attentions: Optional[bool]=False) -> tuple[torch.FloatTensor]: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. """ residual = hidden_states hidden_states = self.layer_norm1(hidden_states) hidden_states, attn_weights = self.self_attn(hidden_states=hidden_states, output_attentions=output_attentions) hidden_states = residual + hidden_states residual = hidden_states hidden_states = self.layer_norm2(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = residual + hidden_states outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs
class ChineseCLIPVisionLayer(GradientCheckpointingLayer): def __init__(self, config: ChineseCLIPConfig): pass def forward(self, hidden_states: torch.Tensor, output_attentions: Optional[bool]=False) -> tuple[torch.FloatTensor]: ''' Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. ''' pass
3
1
20
3
14
4
2
0.25
1
6
3
0
2
5
2
12
41
6
28
15
21
7
21
11
18
2
1
1
3
1,170
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/chinese_clip/modeling_chinese_clip.py
transformers.models.chinese_clip.modeling_chinese_clip.ChineseCLIPVisionMLP
from ...activations import ACT2FN import torch from torch import nn class ChineseCLIPVisionMLP(nn.Module): def __init__(self, config): super().__init__() self.config = config self.activation_fn = ACT2FN[config.hidden_act] self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size) self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.fc1(hidden_states) hidden_states = self.activation_fn(hidden_states) hidden_states = self.fc2(hidden_states) return hidden_states
class ChineseCLIPVisionMLP(nn.Module): def __init__(self, config): pass def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: pass
3
0
6
0
6
0
1
0
1
2
0
0
2
4
2
12
13
1
12
7
9
0
12
7
9
1
1
0
2
1,171
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/chinese_clip/modeling_chinese_clip.py
transformers.models.chinese_clip.modeling_chinese_clip.ChineseCLIPVisionModel
import torch from ...utils import ModelOutput, auto_docstring, can_return_tuple, filter_out_non_signature_kwargs, logging, torch_int from typing import Any, Callable, Optional, Union from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, BaseModelOutputWithPoolingAndCrossAttentions from .configuration_chinese_clip import ChineseCLIPConfig, ChineseCLIPTextConfig, ChineseCLIPVisionConfig from torch import nn @auto_docstring(custom_intro='\n The vision model from CHINESE_CLIP without any head or projection on top.\n ') class ChineseCLIPVisionModel(ChineseCLIPPreTrainedModel): config: ChineseCLIPVisionConfig main_input_name = 'pixel_values' _no_split_modules = ['ChineseCLIPVisionEmbeddings', 'ChineseCLIPVisionAttention'] def __init__(self, config: ChineseCLIPVisionConfig): super().__init__(config) self.vision_model = ChineseCLIPVisionTransformer(config) self.post_init() def get_input_embeddings(self) -> nn.Module: return self.vision_model.embeddings.patch_embedding @auto_docstring def forward(self, pixel_values: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, interpolate_pos_encoding: bool=False, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutputWithPooling]: """ Examples: ```python >>> from PIL import Image >>> import requests >>> from transformers import CLIPProcessor, ChineseCLIPVisionModel >>> model = ChineseCLIPVisionModel.from_pretrained("OFA-Sys/chinese-clip-vit-base-patch16") >>> processor = CLIPProcessor.from_pretrained("OFA-Sys/chinese-clip-vit-base-patch16") >>> url = "https://clip-cn-beijing.oss-cn-beijing.aliyuncs.com/pokemon.jpeg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> inputs = processor(images=image, return_tensors="pt") >>> outputs = model(**inputs) >>> last_hidden_state = outputs.last_hidden_state >>> pooled_output = outputs.pooler_output # pooled CLS states ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict return self.vision_model(pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, interpolate_pos_encoding=interpolate_pos_encoding, return_dict=return_dict)
@auto_docstring(custom_intro='\n The vision model from CHINESE_CLIP without any head or projection on top.\n ') class ChineseCLIPVisionModel(ChineseCLIPPreTrainedModel): def __init__(self, config: ChineseCLIPVisionConfig): pass def get_input_embeddings(self) -> nn.Module: pass @auto_docstring def forward(self, pixel_values: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, interpolate_pos_encoding: bool=False, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutputWithPooling]: ''' Examples: ```python >>> from PIL import Image >>> import requests >>> from transformers import CLIPProcessor, ChineseCLIPVisionModel >>> model = ChineseCLIPVisionModel.from_pretrained("OFA-Sys/chinese-clip-vit-base-patch16") >>> processor = CLIPProcessor.from_pretrained("OFA-Sys/chinese-clip-vit-base-patch16") >>> url = "https://clip-cn-beijing.oss-cn-beijing.aliyuncs.com/pokemon.jpeg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> inputs = processor(images=image, return_tensors="pt") >>> outputs = model(**inputs) >>> last_hidden_state = outputs.last_hidden_state >>> pooled_output = outputs.pooler_output # pooled CLS states ```''' pass
6
1
15
2
7
6
1
0.61
1
5
3
0
3
1
3
4
55
10
28
16
15
17
13
8
9
2
2
0
4
1,172
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/chinese_clip/modeling_chinese_clip.py
transformers.models.chinese_clip.modeling_chinese_clip.ChineseCLIPVisionTransformer
from torch import nn import torch from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, BaseModelOutputWithPoolingAndCrossAttentions from ...utils import ModelOutput, auto_docstring, can_return_tuple, filter_out_non_signature_kwargs, logging, torch_int from .configuration_chinese_clip import ChineseCLIPConfig, ChineseCLIPTextConfig, ChineseCLIPVisionConfig from typing import Any, Callable, Optional, Union class ChineseCLIPVisionTransformer(nn.Module): def __init__(self, config: ChineseCLIPVisionConfig): super().__init__() self.config = config embed_dim = config.hidden_size self.embeddings = ChineseCLIPVisionEmbeddings(config) self.pre_layrnorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) self.encoder = ChineseCLIPVisionEncoder(config) self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) @can_return_tuple @auto_docstring def forward(self, pixel_values: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, interpolate_pos_encoding: bool=False, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutputWithPooling]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError('You have to specify pixel_values') hidden_states = self.embeddings(pixel_values, interpolate_pos_encoding=interpolate_pos_encoding) hidden_states = self.pre_layrnorm(hidden_states) encoder_outputs = self.encoder(inputs_embeds=hidden_states, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True) last_hidden_state = encoder_outputs[0] pooled_output = last_hidden_state[:, 0, :] pooled_output = self.post_layernorm(pooled_output) return BaseModelOutputWithPooling(last_hidden_state=last_hidden_state, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions)
class ChineseCLIPVisionTransformer(nn.Module): def __init__(self, config: ChineseCLIPVisionConfig): pass @can_return_tuple @auto_docstring def forward(self, pixel_values: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, interpolate_pos_encoding: bool=False, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutputWithPooling]: pass
5
0
26
4
21
2
4
0.07
1
7
4
0
2
5
2
12
56
8
45
21
33
3
24
13
21
6
1
1
7
1,173
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/chinese_clip/processing_chinese_clip.py
transformers.models.chinese_clip.processing_chinese_clip.ChineseCLIPProcessor
from ...processing_utils import ProcessorMixin import warnings class ChineseCLIPProcessor(ProcessorMixin): """ Constructs a Chinese-CLIP processor which wraps a Chinese-CLIP image processor and a Chinese-CLIP tokenizer into a single processor. [`ChineseCLIPProcessor`] offers all the functionalities of [`ChineseCLIPImageProcessor`] and [`BertTokenizerFast`]. See the [`~ChineseCLIPProcessor.__call__`] and [`~ChineseCLIPProcessor.decode`] for more information. Args: image_processor ([`ChineseCLIPImageProcessor`], *optional*): The image processor is a required input. tokenizer ([`BertTokenizerFast`], *optional*): The tokenizer is a required input. """ attributes = ['image_processor', 'tokenizer'] image_processor_class = ('ChineseCLIPImageProcessor', 'ChineseCLIPImageProcessorFast') tokenizer_class = ('BertTokenizer', 'BertTokenizerFast') def __init__(self, image_processor=None, tokenizer=None, **kwargs): feature_extractor = None if 'feature_extractor' in kwargs: warnings.warn('The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor` instead.', FutureWarning) feature_extractor = kwargs.pop('feature_extractor') image_processor = image_processor if image_processor is not None else feature_extractor super().__init__(image_processor, tokenizer) self.current_processor = self.image_processor @property def feature_extractor_class(self): warnings.warn('`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.', FutureWarning) return self.image_processor_class
class ChineseCLIPProcessor(ProcessorMixin): ''' Constructs a Chinese-CLIP processor which wraps a Chinese-CLIP image processor and a Chinese-CLIP tokenizer into a single processor. [`ChineseCLIPProcessor`] offers all the functionalities of [`ChineseCLIPImageProcessor`] and [`BertTokenizerFast`]. See the [`~ChineseCLIPProcessor.__call__`] and [`~ChineseCLIPProcessor.decode`] for more information. Args: image_processor ([`ChineseCLIPImageProcessor`], *optional*): The image processor is a required input. tokenizer ([`BertTokenizerFast`], *optional*): The tokenizer is a required input. ''' def __init__(self, image_processor=None, tokenizer=None, **kwargs): pass @property def feature_extractor_class(self): pass
4
1
17
2
10
6
3
0.75
1
7
2
0
6
1
6
23
130
18
64
27
48
48
42
18
35
7
2
1
16
1,174
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/clap/configuration_clap.py
transformers.models.clap.configuration_clap.ClapAudioConfig
from ...configuration_utils import PretrainedConfig class ClapAudioConfig(PretrainedConfig): """ This is the configuration class to store the configuration of a [`ClapAudioModel`]. It is used to instantiate a CLAP audio encoder according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the audio encoder of the CLAP [laion/clap-htsat-fused](https://huggingface.co/laion/clap-htsat-fused) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: window_size (`int`, *optional*, defaults to 8): Image size of the spectrogram num_mel_bins (`int`, *optional*, defaults to 64): Number of mel features used per frames. Should correspond to the value used in the `ClapProcessor` class. spec_size (`int`, *optional*, defaults to 256): Desired input size of the spectrogram that the model supports. It can be different from the output of the `ClapFeatureExtractor`, in which case the input features will be resized. Corresponds to the `image_size` of the audio models. hidden_act (`str`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. patch_size (`int`, *optional*, defaults to 4): Patch size for the audio spectrogram patch_stride (`list`, *optional*, defaults to `[4, 4]`): Patch stride for the audio spectrogram num_classes (`int`, *optional*, defaults to 527): Number of classes used for the head training hidden_size (`int`, *optional*, defaults to 768): Hidden size of the output of the audio encoder. Correspond to the dimension of the penultimate layer's output,which is sent to the projection MLP layer. projection_dim (`int`, *optional*, defaults to 512): Hidden size of the projection layer. depths (`list`, *optional*, defaults to `[2, 2, 6, 2]`): Depths used for the Swin Layers of the audio model num_attention_heads (`list`, *optional*, defaults to `[4, 8, 16, 32]`): Number of attention heads used for the Swin Layers of the audio model enable_fusion (`bool`, *optional*, defaults to `False`): Whether or not to enable patch fusion. This is the main contribution of the authors, and should give the best results. hidden_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the encoder. fusion_type (`[type]`, *optional*): Fusion type used for the patch fusion. patch_embed_input_channels (`int`, *optional*, defaults to 1): Number of channels used for the input spectrogram flatten_patch_embeds (`bool`, *optional*, defaults to `True`): Whether or not to flatten the patch embeddings patch_embeds_hidden_size (`int`, *optional*, defaults to 96): Hidden size of the patch embeddings. It is used as the number of output channels. enable_patch_layer_norm (`bool`, *optional*, defaults to `True`): Whether or not to enable layer normalization for the patch embeddings drop_path_rate (`float`, *optional*, defaults to 0.0): Drop path rate for the patch fusion attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. qkv_bias (`bool`, *optional*, defaults to `True`): Whether or not to add a bias to the query, key, value projections. mlp_ratio (`float`, *optional*, defaults to 4.0): Ratio of the mlp hidden dim to embedding dim. aff_block_r (`int`, *optional*, defaults to 4): downsize_ratio used in the AudioFF block num_hidden_layers (`int`, *optional*, defaults to 4): Number of hidden layers in the Transformer encoder. projection_hidden_act (`str`, *optional*, defaults to `"relu"`): The non-linear activation function (function or string) in the projection layer. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. layer_norm_eps (`[type]`, *optional*, defaults to 1e-05): The epsilon used by the layer normalization layers. initializer_factor (`float`, *optional*, defaults to 1.0): A factor for initializing all weight matrices (should be kept to 1, used internally for initialization testing). Example: ```python >>> from transformers import ClapAudioConfig, ClapAudioModel >>> # Initializing a ClapAudioConfig with laion/clap-htsat-fused style configuration >>> configuration = ClapAudioConfig() >>> # Initializing a ClapAudioModel (with random weights) from the laion/clap-htsat-fused style configuration >>> model = ClapAudioModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = 'clap_audio_model' base_config_key = 'audio_config' def __init__(self, window_size=8, num_mel_bins=64, spec_size=256, hidden_act='gelu', patch_size=4, patch_stride=[4, 4], num_classes=527, hidden_size=768, projection_dim=512, depths=[2, 2, 6, 2], num_attention_heads=[4, 8, 16, 32], enable_fusion=False, hidden_dropout_prob=0.1, fusion_type=None, patch_embed_input_channels=1, flatten_patch_embeds=True, patch_embeds_hidden_size=96, enable_patch_layer_norm=True, drop_path_rate=0.0, attention_probs_dropout_prob=0.0, qkv_bias=True, mlp_ratio=4.0, aff_block_r=4, num_hidden_layers=4, projection_hidden_act='relu', layer_norm_eps=1e-05, initializer_factor=1.0, **kwargs): super().__init__(**kwargs) self.window_size = window_size self.num_mel_bins = num_mel_bins self.spec_size = spec_size self.patch_size = patch_size self.patch_stride = patch_stride self.num_classes = num_classes self.hidden_size = hidden_size self.depths = depths self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.window_size = window_size self.enable_fusion = enable_fusion self.fusion_type = fusion_type self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.projection_dim = projection_dim self.flatten_patch_embeds = flatten_patch_embeds self.patch_embeds_hidden_size = patch_embeds_hidden_size self.enable_patch_layer_norm = enable_patch_layer_norm self.drop_path_rate = drop_path_rate self.attention_probs_dropout_prob = attention_probs_dropout_prob self.qkv_bias = qkv_bias self.mlp_ratio = mlp_ratio self.patch_embed_input_channels = patch_embed_input_channels self.aff_block_r = aff_block_r self.layer_norm_eps = layer_norm_eps self.initializer_factor = initializer_factor self.projection_hidden_act = projection_hidden_act
class ClapAudioConfig(PretrainedConfig): ''' This is the configuration class to store the configuration of a [`ClapAudioModel`]. It is used to instantiate a CLAP audio encoder according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the audio encoder of the CLAP [laion/clap-htsat-fused](https://huggingface.co/laion/clap-htsat-fused) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: window_size (`int`, *optional*, defaults to 8): Image size of the spectrogram num_mel_bins (`int`, *optional*, defaults to 64): Number of mel features used per frames. Should correspond to the value used in the `ClapProcessor` class. spec_size (`int`, *optional*, defaults to 256): Desired input size of the spectrogram that the model supports. It can be different from the output of the `ClapFeatureExtractor`, in which case the input features will be resized. Corresponds to the `image_size` of the audio models. hidden_act (`str`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. patch_size (`int`, *optional*, defaults to 4): Patch size for the audio spectrogram patch_stride (`list`, *optional*, defaults to `[4, 4]`): Patch stride for the audio spectrogram num_classes (`int`, *optional*, defaults to 527): Number of classes used for the head training hidden_size (`int`, *optional*, defaults to 768): Hidden size of the output of the audio encoder. Correspond to the dimension of the penultimate layer's output,which is sent to the projection MLP layer. projection_dim (`int`, *optional*, defaults to 512): Hidden size of the projection layer. depths (`list`, *optional*, defaults to `[2, 2, 6, 2]`): Depths used for the Swin Layers of the audio model num_attention_heads (`list`, *optional*, defaults to `[4, 8, 16, 32]`): Number of attention heads used for the Swin Layers of the audio model enable_fusion (`bool`, *optional*, defaults to `False`): Whether or not to enable patch fusion. This is the main contribution of the authors, and should give the best results. hidden_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the encoder. fusion_type (`[type]`, *optional*): Fusion type used for the patch fusion. patch_embed_input_channels (`int`, *optional*, defaults to 1): Number of channels used for the input spectrogram flatten_patch_embeds (`bool`, *optional*, defaults to `True`): Whether or not to flatten the patch embeddings patch_embeds_hidden_size (`int`, *optional*, defaults to 96): Hidden size of the patch embeddings. It is used as the number of output channels. enable_patch_layer_norm (`bool`, *optional*, defaults to `True`): Whether or not to enable layer normalization for the patch embeddings drop_path_rate (`float`, *optional*, defaults to 0.0): Drop path rate for the patch fusion attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. qkv_bias (`bool`, *optional*, defaults to `True`): Whether or not to add a bias to the query, key, value projections. mlp_ratio (`float`, *optional*, defaults to 4.0): Ratio of the mlp hidden dim to embedding dim. aff_block_r (`int`, *optional*, defaults to 4): downsize_ratio used in the AudioFF block num_hidden_layers (`int`, *optional*, defaults to 4): Number of hidden layers in the Transformer encoder. projection_hidden_act (`str`, *optional*, defaults to `"relu"`): The non-linear activation function (function or string) in the projection layer. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. layer_norm_eps (`[type]`, *optional*, defaults to 1e-05): The epsilon used by the layer normalization layers. initializer_factor (`float`, *optional*, defaults to 1.0): A factor for initializing all weight matrices (should be kept to 1, used internally for initialization testing). Example: ```python >>> from transformers import ClapAudioConfig, ClapAudioModel >>> # Initializing a ClapAudioConfig with laion/clap-htsat-fused style configuration >>> configuration = ClapAudioConfig() >>> # Initializing a ClapAudioModel (with random weights) from the laion/clap-htsat-fused style configuration >>> model = ClapAudioModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```''' def __init__(self, window_size=8, num_mel_bins=64, spec_size=256, hidden_act='gelu', patch_size=4, patch_stride=[4, 4], num_classes=527, hidden_size=768, projection_dim=512, depths=[2, 2, 6, 2], num_attention_heads=[4, 8, 16, 32], enable_fusion=False, hidden_dropout_prob=0.1, fusion_type=None, patch_embed_input_channels=1, flatten_patch_embeds=True, patch_embeds_hidden_size=96, enable_patch_layer_norm=True, drop_path_rate=0.0, attention_probs_dropout_prob=0.0, qkv_bias=True, mlp_ratio=4.0, aff_block_r=4, num_hidden_layers=4, projection_hidden_act='relu', layer_norm_eps=1e-05, initializer_factor=1.0, **kwargs): pass
2
1
60
0
60
0
1
1.25
1
1
0
0
1
27
1
1
151
9
63
61
31
79
33
31
31
1
1
0
1
1,175
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/clap/configuration_clap.py
transformers.models.clap.configuration_clap.ClapConfig
from ...configuration_utils import PretrainedConfig class ClapConfig(PretrainedConfig): """ [`ClapConfig`] is the configuration class to store the configuration of a [`ClapModel`]. It is used to instantiate a CLAP model according to the specified arguments, defining the text model and audio model configs. Instantiating a configuration with the defaults will yield a similar configuration to that of the CLAP [laion/clap-htsat-fused](https://huggingface.co/laion/clap-htsat-fused) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: text_config (`dict`, *optional*): Dictionary of configuration options used to initialize [`ClapTextConfig`]. audio_config (`dict`, *optional*): Dictionary of configuration options used to initialize [`ClapAudioConfig`]. logit_scale_init_value (`float`, *optional*, defaults to 14.29): The initial value of the *logit_scale* parameter. Default is used as per the original CLAP implementation. projection_dim (`int`, *optional*, defaults to 512): Dimensionality of text and audio projection layers. projection_hidden_act (`str`, *optional*, defaults to `"relu"`): Activation function for the projection layers. initializer_factor (`float`, *optional*, defaults to 1.0): Factor to scale the initialization of the model weights. kwargs (*optional*): Dictionary of keyword arguments. Example: ```python >>> from transformers import ClapConfig, ClapModel >>> # Initializing a ClapConfig with laion-ai/base style configuration >>> configuration = ClapConfig() >>> # Initializing a ClapModel (with random weights) from the laion-ai/base style configuration >>> model = ClapModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config >>> # We can also initialize a ClapConfig from a ClapTextConfig and a ClapAudioConfig >>> from transformers import ClapTextConfig, ClapAudioConfig >>> # Initializing a ClapText and ClapAudioConfig configuration >>> config_text = ClapTextConfig() >>> config_audio = ClapAudioConfig() >>> config = ClapConfig.from_text_audio_configs(config_text, config_audio) ```""" model_type = 'clap' sub_configs = {'text_config': ClapTextConfig, 'audio_config': ClapAudioConfig} def __init__(self, text_config=None, audio_config=None, logit_scale_init_value=1 / 0.07, projection_dim=512, projection_hidden_act='relu', initializer_factor=1.0, **kwargs): super().__init__(**kwargs) if text_config is None: text_config = {} logger.info('text_config is None. Initializing the ClapTextConfig with default values.') if audio_config is None: audio_config = {} logger.info('audio_config is None. initializing the ClapAudioConfig with default values.') self.text_config = ClapTextConfig(**text_config) self.audio_config = ClapAudioConfig(**audio_config) self.text_config.projection_dim = projection_dim self.audio_config.projection_dim = projection_dim self.text_config.projection_hidden_act = projection_hidden_act self.audio_config.projection_hidden_act = projection_hidden_act self.projection_dim = projection_dim self.projection_hidden_act = projection_hidden_act self.hidden_size = self.text_config.hidden_size self.logit_scale_init_value = logit_scale_init_value self.initializer_factor = initializer_factor self.num_hidden_layers = self.text_config.num_hidden_layers + len(self.audio_config.depths)
class ClapConfig(PretrainedConfig): ''' [`ClapConfig`] is the configuration class to store the configuration of a [`ClapModel`]. It is used to instantiate a CLAP model according to the specified arguments, defining the text model and audio model configs. Instantiating a configuration with the defaults will yield a similar configuration to that of the CLAP [laion/clap-htsat-fused](https://huggingface.co/laion/clap-htsat-fused) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: text_config (`dict`, *optional*): Dictionary of configuration options used to initialize [`ClapTextConfig`]. audio_config (`dict`, *optional*): Dictionary of configuration options used to initialize [`ClapAudioConfig`]. logit_scale_init_value (`float`, *optional*, defaults to 14.29): The initial value of the *logit_scale* parameter. Default is used as per the original CLAP implementation. projection_dim (`int`, *optional*, defaults to 512): Dimensionality of text and audio projection layers. projection_hidden_act (`str`, *optional*, defaults to `"relu"`): Activation function for the projection layers. initializer_factor (`float`, *optional*, defaults to 1.0): Factor to scale the initialization of the model weights. kwargs (*optional*): Dictionary of keyword arguments. Example: ```python >>> from transformers import ClapConfig, ClapModel >>> # Initializing a ClapConfig with laion-ai/base style configuration >>> configuration = ClapConfig() >>> # Initializing a ClapModel (with random weights) from the laion-ai/base style configuration >>> model = ClapModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config >>> # We can also initialize a ClapConfig from a ClapTextConfig and a ClapAudioConfig >>> from transformers import ClapTextConfig, ClapAudioConfig >>> # Initializing a ClapText and ClapAudioConfig configuration >>> config_text = ClapTextConfig() >>> config_audio = ClapAudioConfig() >>> config = ClapConfig.from_text_audio_configs(config_text, config_audio) ```''' def __init__(self, text_config=None, audio_config=None, logit_scale_init_value=1 / 0.07, projection_dim=512, projection_hidden_act='relu', initializer_factor=1.0, **kwargs): pass
2
1
23
4
16
3
2
1.26
1
3
2
0
1
8
2
2
100
21
35
23
22
44
25
13
22
3
1
1
4
1,176
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/clap/configuration_clap.py
transformers.models.clap.configuration_clap.ClapTextConfig
from ...configuration_utils import PretrainedConfig class ClapTextConfig(PretrainedConfig): """ This is the configuration class to store the configuration of a [`ClapTextModel`]. It is used to instantiate a CLAP model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the CLAP [calp-hsat-fused](https://huggingface.co/laion/clap-hsat-fused) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 30522): Vocabulary size of the CLAP model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`ClapTextModel`]. hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (`int`, *optional*, defaults to 3072): Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder. hidden_act (`str` or `Callable`, *optional*, defaults to `"relu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"relu"`, `"relu"`, `"silu"` and `"relu_new"` are supported. hidden_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout ratio for the attention probabilities. max_position_embeddings (`int`, *optional*, defaults to 512): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). type_vocab_size (`int`, *optional*, defaults to 2): The vocabulary size of the `token_type_ids` passed when calling [`ClapTextModel`]. layer_norm_eps (`float`, *optional*, defaults to 1e-12): The epsilon used by the layer normalization layers. position_embedding_type (`str`, *optional*, defaults to `"absolute"`): Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to [Self-Attention with Relative Position Representations (Shaw et al.)](https://huggingface.co/papers/1803.02155). For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models with Better Relative Position Embeddings (Huang et al.)](https://huggingface.co/papers/2009.13658). is_decoder (`bool`, *optional*, defaults to `False`): Whether the model is used as a decoder or not. If `False`, the model is used as an encoder. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). Only relevant if `config.is_decoder=True`. projection_hidden_act (`str`, *optional*, defaults to `"relu"`): The non-linear activation function (function or string) in the projection layer. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. projection_dim (`int`, *optional*, defaults to 512) Dimension of the projection head of the `ClapTextModelWithProjection`. Examples: ```python >>> from transformers import ClapTextConfig, ClapTextModel >>> # Initializing a CLAP text configuration >>> configuration = ClapTextConfig() >>> # Initializing a model (with random weights) from the configuration >>> model = ClapTextModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = 'clap_text_model' base_config_key = 'text_config' def __init__(self, vocab_size=50265, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=514, type_vocab_size=1, initializer_factor=1.0, layer_norm_eps=1e-12, projection_dim=512, pad_token_id=1, bos_token_id=0, eos_token_id=2, position_embedding_type='absolute', use_cache=True, projection_hidden_act='relu', **kwargs): super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.hidden_act = hidden_act self.intermediate_size = intermediate_size self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.initializer_factor = initializer_factor self.layer_norm_eps = layer_norm_eps self.position_embedding_type = position_embedding_type self.use_cache = use_cache self.projection_hidden_act = projection_hidden_act self.projection_dim = projection_dim
class ClapTextConfig(PretrainedConfig): ''' This is the configuration class to store the configuration of a [`ClapTextModel`]. It is used to instantiate a CLAP model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the CLAP [calp-hsat-fused](https://huggingface.co/laion/clap-hsat-fused) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 30522): Vocabulary size of the CLAP model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`ClapTextModel`]. hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (`int`, *optional*, defaults to 3072): Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder. hidden_act (`str` or `Callable`, *optional*, defaults to `"relu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"relu"`, `"relu"`, `"silu"` and `"relu_new"` are supported. hidden_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout ratio for the attention probabilities. max_position_embeddings (`int`, *optional*, defaults to 512): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). type_vocab_size (`int`, *optional*, defaults to 2): The vocabulary size of the `token_type_ids` passed when calling [`ClapTextModel`]. layer_norm_eps (`float`, *optional*, defaults to 1e-12): The epsilon used by the layer normalization layers. position_embedding_type (`str`, *optional*, defaults to `"absolute"`): Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to [Self-Attention with Relative Position Representations (Shaw et al.)](https://huggingface.co/papers/1803.02155). For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models with Better Relative Position Embeddings (Huang et al.)](https://huggingface.co/papers/2009.13658). is_decoder (`bool`, *optional*, defaults to `False`): Whether the model is used as a decoder or not. If `False`, the model is used as an encoder. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). Only relevant if `config.is_decoder=True`. projection_hidden_act (`str`, *optional*, defaults to `"relu"`): The non-linear activation function (function or string) in the projection layer. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. projection_dim (`int`, *optional*, defaults to 512) Dimension of the projection head of the `ClapTextModelWithProjection`. Examples: ```python >>> from transformers import ClapTextConfig, ClapTextModel >>> # Initializing a CLAP text configuration >>> configuration = ClapTextConfig() >>> # Initializing a model (with random weights) from the configuration >>> model = ClapTextModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```''' def __init__(self, vocab_size=50265, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=514, type_vocab_size=1, initializer_factor=1.0, layer_norm_eps=1e-12, projection_dim=512, pad_token_id=1, bos_token_id=0, eos_token_id=2, position_embedding_type='absolute', use_cache=True, projection_hidden_act='relu', **kwargs): pass
2
1
41
1
40
0
1
1.37
1
1
0
0
1
16
1
1
113
11
43
42
19
59
21
20
19
1
1
0
1
1,177
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/clap/feature_extraction_clap.py
transformers.models.clap.feature_extraction_clap.ClapFeatureExtractor
from ...feature_extraction_utils import BatchFeature import numpy as np from typing import Any, Optional, Union from ...utils import TensorType, logging import torch from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...utils.import_utils import requires from ...feature_extraction_sequence_utils import SequenceFeatureExtractor import copy @requires(backends=('torch',)) class ClapFeatureExtractor(SequenceFeatureExtractor): """ Constructs a CLAP feature extractor. This feature extractor inherits from [`~feature_extraction_sequence_utils.SequenceFeatureExtractor`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. This class extracts mel-filter bank features from raw speech using a custom numpy implementation of the *Short Time Fourier Transform* (STFT) which should match pytorch's `torch.stft` equivalent. Args: feature_size (`int`, *optional*, defaults to 64): The feature dimension of the extracted Mel spectrograms. This corresponds to the number of mel filters (`n_mels`). sampling_rate (`int`, *optional*, defaults to 48000): The sampling rate at which the audio files should be digitalized expressed in hertz (Hz). This only serves to warn users if the audio fed to the feature extractor does not have the same sampling rate. hop_length (`int`,*optional*, defaults to 480): Length of the overlapping windows for the STFT used to obtain the Mel Spectrogram. The audio will be split in smaller `frames` with a step of `hop_length` between each frame. max_length_s (`int`, *optional*, defaults to 10): The maximum input length of the model in seconds. This is used to pad the audio. fft_window_size (`int`, *optional*, defaults to 1024): Size of the window (in samples) on which the Fourier transform is applied. This controls the frequency resolution of the spectrogram. 400 means that the fourier transform is computed on windows of 400 samples. padding_value (`float`, *optional*, defaults to 0.0): Padding value used to pad the audio. Should correspond to silences. return_attention_mask (`bool`, *optional*, defaults to `False`): Whether or not the model should return the attention masks corresponding to the input. frequency_min (`float`, *optional*, defaults to 0): The lowest frequency of interest. The STFT will not be computed for values below this. frequency_max (`float`, *optional*, defaults to 14000): The highest frequency of interest. The STFT will not be computed for values above this. top_db (`float`, *optional*): The highest decibel value used to convert the mel spectrogram to the log scale. For more details see the `audio_utils.power_to_db` function truncation (`str`, *optional*, defaults to `"fusion"`): Truncation pattern for long audio inputs. Two patterns are available: - `fusion` will use `_random_mel_fusion`, which stacks 3 random crops from the mel spectrogram and a downsampled version of the entire mel spectrogram. If `config.fusion` is set to True, shorter audios also need to to return 4 mels, which will just be a copy of the original mel obtained from the padded audio. - `rand_trunc` will select a random crop of the mel spectrogram. padding (`str`, *optional*, defaults to `"repeatpad"`): Padding pattern for shorter audio inputs. Three patterns were originally implemented: - `repeatpad`: the audio is repeated, and then padded to fit the `max_length`. - `repeat`: the audio is repeated and then cut to fit the `max_length` - `pad`: the audio is padded. """ model_input_names = ['input_features', 'is_longer'] def __init__(self, feature_size=64, sampling_rate=48000, hop_length=480, max_length_s=10, fft_window_size=1024, padding_value=0.0, return_attention_mask=False, frequency_min: float=0, frequency_max: float=14000, top_db: Optional[int]=None, truncation: str='fusion', padding: str='repeatpad', **kwargs): super().__init__(feature_size=feature_size, sampling_rate=sampling_rate, padding_value=padding_value, return_attention_mask=return_attention_mask, **kwargs) self.top_db = top_db self.truncation = truncation self.padding = padding self.fft_window_size = fft_window_size self.nb_frequency_bins = (fft_window_size >> 1) + 1 self.hop_length = hop_length self.max_length_s = max_length_s self.nb_max_samples = max_length_s * sampling_rate self.sampling_rate = sampling_rate self.frequency_min = frequency_min self.frequency_max = frequency_max self.mel_filters = mel_filter_bank(num_frequency_bins=self.nb_frequency_bins, num_mel_filters=feature_size, min_frequency=frequency_min, max_frequency=frequency_max, sampling_rate=sampling_rate, norm=None, mel_scale='htk') self.mel_filters_slaney = mel_filter_bank(num_frequency_bins=self.nb_frequency_bins, num_mel_filters=feature_size, min_frequency=frequency_min, max_frequency=frequency_max, sampling_rate=sampling_rate, norm='slaney', mel_scale='slaney') def to_dict(self) -> dict[str, Any]: """ Serializes this instance to a Python dictionary. Returns: `dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance, except for the mel filter banks, which do not need to be saved or printed as they are too long. """ output = copy.deepcopy(self.__dict__) output['feature_extractor_type'] = self.__class__.__name__ if 'mel_filters' in output: del output['mel_filters'] if 'mel_filters_slaney' in output: del output['mel_filters_slaney'] return output def _np_extract_fbank_features(self, waveform: np.ndarray, mel_filters: Optional[np.array]=None) -> np.ndarray: """ Compute the log-mel spectrogram of the provided `waveform` using the Hann window. In CLAP, two different filter banks are used depending on the truncation pattern: - `self.mel_filters`: they correspond to the default parameters of `torchaudio` which can be obtained from calling `torchaudio.transforms.MelSpectrogram().mel_scale.fb`. These filters are used when `truncation` is set to `"fusion"`. - `self.mel_filteres_slaney` : they correspond to the default parameters of `librosa` which used `librosa.filters.mel` when computing the mel spectrogram. These filters were only used in the original implementation when the truncation mode is not `"fusion"`. """ log_mel_spectrogram = spectrogram(waveform, window_function(self.fft_window_size, 'hann'), frame_length=self.fft_window_size, hop_length=self.hop_length, power=2.0, mel_filters=mel_filters, log_mel='dB') return log_mel_spectrogram.T def _random_mel_fusion(self, mel, total_frames, chunk_frames): ranges = np.array_split(list(range(0, total_frames - chunk_frames + 1)), 3) if len(ranges[1]) == 0: ranges[1] = [0] if len(ranges[2]) == 0: ranges[2] = [0] idx_front = np.random.choice(ranges[0]) idx_middle = np.random.choice(ranges[1]) idx_back = np.random.choice(ranges[2]) mel_chunk_front = mel[idx_front:idx_front + chunk_frames, :] mel_chunk_middle = mel[idx_middle:idx_middle + chunk_frames, :] mel_chunk_back = mel[idx_back:idx_back + chunk_frames, :] mel = torch.tensor(mel[None, None, :]) mel_shrink = torch.nn.functional.interpolate(mel, size=[chunk_frames, 64], mode='bilinear', align_corners=False) mel_shrink = mel_shrink[0][0].numpy() mel_fusion = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back], axis=0) return mel_fusion def _get_input_mel(self, waveform: np.ndarray, max_length, truncation, padding) -> np.array: """ Extracts the mel spectrogram and prepares it for the mode based on the `truncation` and `padding` arguments. Four different path are possible: - `truncation="fusion"` and the length of the waveform is greater than the max length: the mel spectrogram will be computed on the entire audio. 3 random crops and a dowsampled version of the full mel spectrogram are then stacked together. They will later be used for `feature_fusion`. - `truncation="rand_trunc"` and the length of the waveform is smaller than the max length: the audio is padded based on `padding`. - `truncation="fusion"` and the length of the waveform is smaller than the max length: the audio is padded based on `padding`, and is repeated `4` times. - `truncation="rand_trunc"` and the length of the waveform is greater than the max length: the mel spectrogram will be computed on a random crop of the waveform. """ if waveform.shape[0] > max_length: if truncation == 'rand_trunc': longer = True overflow = len(waveform) - max_length idx = np.random.randint(0, overflow + 1) waveform = waveform[idx:idx + max_length] input_mel = self._np_extract_fbank_features(waveform, self.mel_filters_slaney)[None, :] elif truncation == 'fusion': mel = self._np_extract_fbank_features(waveform, self.mel_filters) chunk_frames = max_length // self.hop_length + 1 total_frames = mel.shape[0] if chunk_frames == total_frames: input_mel = np.stack([mel, mel, mel, mel], axis=0) longer = False else: input_mel = self._random_mel_fusion(mel, total_frames, chunk_frames) longer = True else: raise NotImplementedError(f'data_truncating {truncation} not implemented') else: longer = False if waveform.shape[0] < max_length: if padding == 'repeat': n_repeat = int(max_length / len(waveform)) waveform = np.tile(waveform, n_repeat + 1)[:max_length] if padding == 'repeatpad': n_repeat = int(max_length / len(waveform)) waveform = np.tile(waveform, n_repeat) waveform = np.pad(waveform, (0, max_length - waveform.shape[0]), mode='constant', constant_values=0) if truncation == 'fusion': input_mel = self._np_extract_fbank_features(waveform, self.mel_filters) input_mel = np.stack([input_mel, input_mel, input_mel, input_mel], axis=0) else: input_mel = self._np_extract_fbank_features(waveform, self.mel_filters_slaney)[None, :] return (input_mel, longer) def __call__(self, raw_speech: Union[np.ndarray, list[float], list[np.ndarray], list[list[float]]], truncation: Optional[str]=None, padding: Optional[str]=None, max_length: Optional[int]=None, sampling_rate: Optional[int]=None, return_tensors: Optional[Union[str, TensorType]]=None, **kwargs) -> BatchFeature: """ Main method to featurize and prepare for the model one or several sequence(s). Args: raw_speech (`np.ndarray`, `list[float]`, `list[np.ndarray]`, `list[list[float]]`): The sequence or batch of sequences to be padded. Each sequence can be a numpy array, a list of float values, a list of numpy arrays or a list of list of float values. Must be mono channel audio, not stereo, i.e. single float per timestep. truncation (`str`, *optional*): Truncation pattern for long audio inputs. Two patterns are available: - `fusion` will use `_random_mel_fusion`, which stacks 3 random crops from the mel spectrogram and a downsampled version of the entire mel spectrogram. If `config.fusion` is set to True, shorter audios also need to to return 4 mels, which will just be a copy of the original mel obtained from the padded audio. - `rand_trunc` will select a random crop of the mel spectrogram. padding (`str`, *optional*): Padding pattern for shorter audio inputs. Three patterns were originally implemented: - `repeatpad`: the audio is repeated, and then padded to fit the `max_length`. - `repeat`: the audio is repeated and then cut to fit the `max_length` - `pad`: the audio is padded. return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors instead of list of python integers. Acceptable values are: - `'pt'`: Return PyTorch `torch.np.array` objects. - `'np'`: Return Numpy `np.ndarray` objects. sampling_rate (`int`, *optional*): The sampling rate at which the `raw_speech` input was sampled. It is strongly recommended to pass `sampling_rate` at the forward call to prevent silent errors and allow automatic speech recognition pipeline. """ truncation = truncation if truncation is not None else self.truncation padding = padding if padding else self.padding if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError(f'The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with {self.sampling_rate} and not {sampling_rate}.') else: logger.warning(f'It is strongly recommended to pass the `sampling_rate` argument to `{self.__class__.__name__}()`. Failing to do so can result in silent errors that might be hard to debug.') is_batched_numpy = isinstance(raw_speech, np.ndarray) and len(raw_speech.shape) > 1 if is_batched_numpy and len(raw_speech.shape) > 2: raise ValueError(f'Only mono-channel audio is supported for input to {self}') is_batched = is_batched_numpy or (isinstance(raw_speech, (list, tuple)) and isinstance(raw_speech[0], (np.ndarray, tuple, list))) if is_batched: raw_speech = [np.asarray(speech, dtype=np.float64) for speech in raw_speech] elif not is_batched and (not isinstance(raw_speech, np.ndarray)): raw_speech = np.asarray(raw_speech, dtype=np.float64) elif isinstance(raw_speech, np.ndarray) and raw_speech.dtype is np.dtype(np.float64): raw_speech = raw_speech.astype(np.float64) if not is_batched: raw_speech = [np.asarray(raw_speech)] padded_inputs = [self._get_input_mel(waveform, max_length if max_length else self.nb_max_samples, truncation, padding) for waveform in raw_speech] input_mel = [] is_longer = [] for mel, longer in padded_inputs: input_mel.append(mel) is_longer.append(longer) if truncation == 'fusion' and sum(is_longer) == 0: rand_idx = np.random.randint(0, len(input_mel)) is_longer[rand_idx] = True if isinstance(input_mel[0], list): input_mel = [np.asarray(feature, dtype=np.float64) for feature in input_mel] is_longer = [[longer] for longer in is_longer] input_features = {'input_features': input_mel, 'is_longer': is_longer} input_features = BatchFeature(input_features) if return_tensors is not None: input_features = input_features.convert_to_tensors(return_tensors) return input_features
@requires(backends=('torch',)) class ClapFeatureExtractor(SequenceFeatureExtractor): ''' Constructs a CLAP feature extractor. This feature extractor inherits from [`~feature_extraction_sequence_utils.SequenceFeatureExtractor`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. This class extracts mel-filter bank features from raw speech using a custom numpy implementation of the *Short Time Fourier Transform* (STFT) which should match pytorch's `torch.stft` equivalent. Args: feature_size (`int`, *optional*, defaults to 64): The feature dimension of the extracted Mel spectrograms. This corresponds to the number of mel filters (`n_mels`). sampling_rate (`int`, *optional*, defaults to 48000): The sampling rate at which the audio files should be digitalized expressed in hertz (Hz). This only serves to warn users if the audio fed to the feature extractor does not have the same sampling rate. hop_length (`int`,*optional*, defaults to 480): Length of the overlapping windows for the STFT used to obtain the Mel Spectrogram. The audio will be split in smaller `frames` with a step of `hop_length` between each frame. max_length_s (`int`, *optional*, defaults to 10): The maximum input length of the model in seconds. This is used to pad the audio. fft_window_size (`int`, *optional*, defaults to 1024): Size of the window (in samples) on which the Fourier transform is applied. This controls the frequency resolution of the spectrogram. 400 means that the fourier transform is computed on windows of 400 samples. padding_value (`float`, *optional*, defaults to 0.0): Padding value used to pad the audio. Should correspond to silences. return_attention_mask (`bool`, *optional*, defaults to `False`): Whether or not the model should return the attention masks corresponding to the input. frequency_min (`float`, *optional*, defaults to 0): The lowest frequency of interest. The STFT will not be computed for values below this. frequency_max (`float`, *optional*, defaults to 14000): The highest frequency of interest. The STFT will not be computed for values above this. top_db (`float`, *optional*): The highest decibel value used to convert the mel spectrogram to the log scale. For more details see the `audio_utils.power_to_db` function truncation (`str`, *optional*, defaults to `"fusion"`): Truncation pattern for long audio inputs. Two patterns are available: - `fusion` will use `_random_mel_fusion`, which stacks 3 random crops from the mel spectrogram and a downsampled version of the entire mel spectrogram. If `config.fusion` is set to True, shorter audios also need to to return 4 mels, which will just be a copy of the original mel obtained from the padded audio. - `rand_trunc` will select a random crop of the mel spectrogram. padding (`str`, *optional*, defaults to `"repeatpad"`): Padding pattern for shorter audio inputs. Three patterns were originally implemented: - `repeatpad`: the audio is repeated, and then padded to fit the `max_length`. - `repeat`: the audio is repeated and then cut to fit the `max_length` - `pad`: the audio is padded. ''' def __init__(self, feature_size=64, sampling_rate=48000, hop_length=480, max_length_s=10, fft_window_size=1024, padding_value=0.0, return_attention_mask=False, frequency_min: float=0, frequency_max: float=14000, top_db: Optional[int]=None, truncation: str='fusion', padding: str='repeatpad', **kwargs): pass def to_dict(self) -> dict[str, Any]: ''' Serializes this instance to a Python dictionary. Returns: `dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance, except for the mel filter banks, which do not need to be saved or printed as they are too long. ''' pass def _np_extract_fbank_features(self, waveform: np.ndarray, mel_filters: Optional[np.array]=None) -> np.ndarray: ''' Compute the log-mel spectrogram of the provided `waveform` using the Hann window. In CLAP, two different filter banks are used depending on the truncation pattern: - `self.mel_filters`: they correspond to the default parameters of `torchaudio` which can be obtained from calling `torchaudio.transforms.MelSpectrogram().mel_scale.fb`. These filters are used when `truncation` is set to `"fusion"`. - `self.mel_filteres_slaney` : they correspond to the default parameters of `librosa` which used `librosa.filters.mel` when computing the mel spectrogram. These filters were only used in the original implementation when the truncation mode is not `"fusion"`. ''' pass def _random_mel_fusion(self, mel, total_frames, chunk_frames): pass def _get_input_mel(self, waveform: np.ndarray, max_length, truncation, padding) -> np.array: ''' Extracts the mel spectrogram and prepares it for the mode based on the `truncation` and `padding` arguments. Four different path are possible: - `truncation="fusion"` and the length of the waveform is greater than the max length: the mel spectrogram will be computed on the entire audio. 3 random crops and a dowsampled version of the full mel spectrogram are then stacked together. They will later be used for `feature_fusion`. - `truncation="rand_trunc"` and the length of the waveform is smaller than the max length: the audio is padded based on `padding`. - `truncation="fusion"` and the length of the waveform is smaller than the max length: the audio is padded based on `padding`, and is repeated `4` times. - `truncation="rand_trunc"` and the length of the waveform is greater than the max length: the mel spectrogram will be computed on a random crop of the waveform. ''' pass def __call__(self, raw_speech: Union[np.ndarray, list[float], list[np.ndarray], list[list[float]]], truncation: Optional[str]=None, padding: Optional[str]=None, max_length: Optional[int]=None, sampling_rate: Optional[int]=None, return_tensors: Optional[Union[str, TensorType]]=None, **kwargs) -> BatchFeature: ''' Main method to featurize and prepare for the model one or several sequence(s). Args: raw_speech (`np.ndarray`, `list[float]`, `list[np.ndarray]`, `list[list[float]]`): The sequence or batch of sequences to be padded. Each sequence can be a numpy array, a list of float values, a list of numpy arrays or a list of list of float values. Must be mono channel audio, not stereo, i.e. single float per timestep. truncation (`str`, *optional*): Truncation pattern for long audio inputs. Two patterns are available: - `fusion` will use `_random_mel_fusion`, which stacks 3 random crops from the mel spectrogram and a downsampled version of the entire mel spectrogram. If `config.fusion` is set to True, shorter audios also need to to return 4 mels, which will just be a copy of the original mel obtained from the padded audio. - `rand_trunc` will select a random crop of the mel spectrogram. padding (`str`, *optional*): Padding pattern for shorter audio inputs. Three patterns were originally implemented: - `repeatpad`: the audio is repeated, and then padded to fit the `max_length`. - `repeat`: the audio is repeated and then cut to fit the `max_length` - `pad`: the audio is padded. return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors instead of list of python integers. Acceptable values are: - `'pt'`: Return PyTorch `torch.np.array` objects. - `'np'`: Return Numpy `np.ndarray` objects. sampling_rate (`int`, *optional*): The sampling rate at which the `raw_speech` input was sampled. It is strongly recommended to pass `sampling_rate` at the forward call to prevent silent errors and allow automatic speech recognition pipeline. ''' pass
8
5
46
4
31
12
5
0.62
1
11
1
0
6
13
6
23
331
31
186
72
155
116
110
48
103
15
3
3
32
1,178
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/clap/modeling_clap.py
transformers.models.clap.modeling_clap.ClapAudioAFFBlock
from torch import nn from .configuration_clap import ClapAudioConfig, ClapConfig, ClapTextConfig class ClapAudioAFFBlock(nn.Module): """ ATTENTIONAL FEATURE FUSION Block from CLAP, since in CLAP we are always in 2D mode, it is not needed to implement the 1D version. """ def __init__(self, config: ClapAudioConfig): super().__init__() channels = config.patch_embeds_hidden_size downsize_ratio = config.aff_block_r inter_channels = int(channels // downsize_ratio) self.local_att = nn.Sequential(nn.Conv2d(channels, inter_channels, kernel_size=1, stride=1, padding=0), nn.BatchNorm2d(inter_channels), nn.ReLU(inplace=True), nn.Conv2d(inter_channels, channels, kernel_size=1, stride=1, padding=0), nn.BatchNorm2d(channels)) self.global_att = nn.Sequential(nn.AdaptiveAvgPool2d(1), nn.Conv2d(channels, inter_channels, kernel_size=1, stride=1, padding=0), nn.BatchNorm2d(inter_channels), nn.ReLU(inplace=True), nn.Conv2d(inter_channels, channels, kernel_size=1, stride=1, padding=0), nn.BatchNorm2d(channels)) self.sigmoid = nn.Sigmoid() def forward(self, hidden_states, residual): attention_input = hidden_states + residual fused_layer_output = self.local_att(attention_input) + self.global_att(attention_input) fused_layer_output = self.sigmoid(fused_layer_output) output = 2 * hidden_states * fused_layer_output + 2 * residual * (1 - fused_layer_output) return output
class ClapAudioAFFBlock(nn.Module): ''' ATTENTIONAL FEATURE FUSION Block from CLAP, since in CLAP we are always in 2D mode, it is not needed to implement the 1D version. ''' def __init__(self, config: ClapAudioConfig): pass def forward(self, hidden_states, residual): pass
3
1
16
2
14
0
1
0.14
1
3
1
0
2
3
2
12
38
6
28
12
25
4
15
12
12
1
1
0
2
1,179
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/clap/modeling_clap.py
transformers.models.clap.modeling_clap.ClapAudioAttention
from typing import Any, Callable, Optional, Union from torch import nn import torch.nn.functional as F import torch from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, meshgrid, prune_linear_layer class ClapAudioAttention(nn.Module): def __init__(self, config, dim, num_heads, window_size): super().__init__() self.self = ClapAudioSelfAttention(config, dim, num_heads, window_size) self.output = ClapAudioSelfOutput(config, dim) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices(heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads) self.self.query = prune_linear_layer(self.self.query, index) self.self.key = prune_linear_layer(self.self.key, index) self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False) -> tuple[torch.Tensor]: self_outputs = self.self(hidden_states, attention_mask, head_mask, output_attentions) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] return outputs
class ClapAudioAttention(nn.Module): def __init__(self, config, dim, num_heads, window_size): pass def prune_heads(self, heads): pass def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False) -> tuple[torch.Tensor]: pass
4
0
11
1
10
1
1
0.1
1
6
2
0
3
3
3
13
36
4
30
17
20
3
22
11
18
2
1
1
4
1,180
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/clap/modeling_clap.py
transformers.models.clap.modeling_clap.ClapAudioEncoder
import torch.nn.functional as F import torch from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, BaseModelOutputWithPoolingAndCrossAttentions from typing import Any, Callable, Optional, Union from torch import nn class ClapAudioEncoder(nn.Module): def __init__(self, config): super().__init__() self.num_layers = len(config.depths) self.config = config self.patch_embed = ClapAudioPatchEmbed(config) self.enable_fusion = config.enable_fusion self.patch_stride = self.patch_embed.patch_stride self.spec_size = config.spec_size self.freq_ratio = config.spec_size // config.num_mel_bins self.num_features = int(config.patch_embeds_hidden_size * 2 ** (self.num_layers - 1)) drop_path_rate = [x.item() for x in torch.linspace(0, config.drop_path_rate, sum(config.depths), device='cpu')] grid_size = self.patch_embed.grid_size self.input_resolutions = [(grid_size[0] // 2 ** i, grid_size[1] // 2 ** i) for i in range(self.num_layers)] self.layers = nn.ModuleList([ClapAudioStage(config=config, dim=int(config.patch_embeds_hidden_size * 2 ** i_layer), input_resolution=self.input_resolutions[i_layer], depth=config.depths[i_layer], num_heads=config.num_attention_heads[i_layer], drop_path=drop_path_rate[sum(config.depths[:i_layer]):sum(config.depths[:i_layer + 1])], downsample=ClapAudioPatchMerging if i_layer < self.num_layers - 1 else None) for i_layer in range(self.num_layers)]) self.gradient_checkpointing = False self.batch_norm = nn.BatchNorm2d(config.num_mel_bins) self.norm = nn.LayerNorm(self.num_features) self.depths = config.depths self.avgpool = nn.AdaptiveAvgPool1d(1) def reshape_mel2img(self, normalized_input_features): """ The input is 4 normalized log mel spectrograms. It is reshape to the common shape of images. Each channel should represent 1 of the 4 crops of the spectrogram. For more details, refer to the [`ClapFeatureExtractor`]. """ _, _, time_length, freq_length = normalized_input_features.shape spec_width = int(self.spec_size * self.freq_ratio) spec_height = self.spec_size // self.freq_ratio if time_length > spec_width or freq_length > spec_height: raise ValueError('the wav size should be less than or equal to the swin input size') if time_length < spec_width: normalized_input_features = nn.functional.interpolate(normalized_input_features, (spec_width, freq_length), mode='bicubic', align_corners=True) if freq_length < spec_height: normalized_input_features = nn.functional.interpolate(normalized_input_features, (time_length, spec_height), mode='bicubic', align_corners=True) batch, channels, time, freq = normalized_input_features.shape normalized_input_features = normalized_input_features.reshape(batch, channels * self.freq_ratio, time // self.freq_ratio, freq) normalized_input_features = normalized_input_features.permute(0, 1, 3, 2).contiguous() normalized_input_features = normalized_input_features.reshape(batch, channels, freq * self.freq_ratio, time // self.freq_ratio) return normalized_input_features def forward(self, input_features, is_longer: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False, output_hidden_states: Optional[bool]=False, output_hidden_states_before_downsampling: Optional[bool]=False, always_partition: Optional[bool]=False, return_dict: Optional[bool]=True) -> Union[tuple, ClapAudioModelOutput]: input_features = input_features.transpose(1, 3) normalized_input_features = self.batch_norm(input_features) normalized_input_features = normalized_input_features.transpose(1, 3) is_longer_list_idx = None if self.enable_fusion: is_longer_list = is_longer.to(input_features.device) is_longer_list_idx = torch.where(is_longer_list == 1)[0] hidden_states = self.reshape_mel2img(normalized_input_features) frames_num = hidden_states.shape[2] hidden_states = self.patch_embed(hidden_states, is_longer_list_idx) all_hidden_states = () if output_hidden_states else None all_reshaped_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None input_dimensions = self.input_resolutions[0] if output_hidden_states: batch_size, _, hidden_size = hidden_states.shape reshaped_hidden_state = hidden_states.view(batch_size, *input_dimensions, hidden_size) reshaped_hidden_state = reshaped_hidden_state.permute(0, 3, 1, 2) all_hidden_states += (hidden_states,) all_reshaped_hidden_states += (reshaped_hidden_state,) for i, layer_module in enumerate(self.layers): layer_head_mask = head_mask[i] if head_mask is not None else None input_dimensions = self.input_resolutions[i] layer_outputs = layer_module(hidden_states, input_dimensions, layer_head_mask, output_attentions, always_partition) hidden_states = layer_outputs[0] hidden_states_before_downsampling = layer_outputs[1] output_dimensions = layer_outputs[2] input_dimensions = (output_dimensions[-2], output_dimensions[-1]) if output_hidden_states and output_hidden_states_before_downsampling: batch_size, _, hidden_size = hidden_states_before_downsampling.shape reshaped_hidden_state = hidden_states_before_downsampling.view(batch_size, *(output_dimensions[0], output_dimensions[1]), hidden_size) reshaped_hidden_state = reshaped_hidden_state.permute(0, 3, 1, 2) all_hidden_states += (hidden_states_before_downsampling,) all_reshaped_hidden_states += (reshaped_hidden_state,) elif output_hidden_states and (not output_hidden_states_before_downsampling): batch_size, _, hidden_size = hidden_states.shape reshaped_hidden_state = hidden_states.view(batch_size, *input_dimensions, hidden_size) reshaped_hidden_state = reshaped_hidden_state.permute(0, 3, 1, 2) all_hidden_states += (hidden_states,) all_reshaped_hidden_states += (reshaped_hidden_state,) if output_attentions: all_self_attentions += layer_outputs[3:] last_hidden_state = self.norm(hidden_states) batch_size, _, n_channels = last_hidden_state.shape freq_shape = frames_num // 2 ** (len(self.depths) - 1) // self.patch_stride[0] temporal_shape = frames_num // 2 ** (len(self.depths) - 1) // self.patch_stride[1] last_hidden_state = last_hidden_state.permute(0, 2, 1).contiguous().reshape(batch_size, n_channels, freq_shape, temporal_shape) batch_size, n_channels, n_frequencies, n_temp = last_hidden_state.shape c_freq_bin = n_frequencies // self.freq_ratio last_hidden_state = last_hidden_state.reshape(batch_size, n_channels, n_frequencies // c_freq_bin, c_freq_bin, n_temp) last_hidden_state = last_hidden_state.permute(0, 1, 3, 2, 4).contiguous().reshape(batch_size, n_channels, c_freq_bin, -1) latent_output = self.avgpool(torch.flatten(last_hidden_state, 2)) latent_output = torch.flatten(latent_output, 1) if not return_dict: return tuple((v for v in [last_hidden_state, latent_output, all_reshaped_hidden_states, all_self_attentions] if v is not None)) return BaseModelOutputWithPooling(last_hidden_state=last_hidden_state, pooler_output=latent_output, hidden_states=all_reshaped_hidden_states, attentions=all_self_attentions)
class ClapAudioEncoder(nn.Module): def __init__(self, config): pass def reshape_mel2img(self, normalized_input_features): ''' The input is 4 normalized log mel spectrograms. It is reshape to the common shape of images. Each channel should represent 1 of the 4 crops of the spectrogram. For more details, refer to the [`ClapFeatureExtractor`]. ''' pass def forward(self, input_features, is_longer: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False, output_hidden_states: Optional[bool]=False, output_hidden_states_before_downsampling: Optional[bool]=False, always_partition: Optional[bool]=False, return_dict: Optional[bool]=True) -> Union[tuple, ClapAudioModelOutput]: pass
4
1
66
12
50
4
6
0.07
1
12
5
0
3
15
3
13
200
37
152
58
138
11
93
48
89
13
1
2
19
1,181
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/clap/modeling_clap.py
transformers.models.clap.modeling_clap.ClapAudioIntermediate
from ...activations import ACT2FN import torch import torch.nn.functional as F from torch import nn class ClapAudioIntermediate(nn.Module): def __init__(self, config, dim): super().__init__() self.dense = nn.Linear(dim, int(config.mlp_ratio * dim)) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states
class ClapAudioIntermediate(nn.Module): def __init__(self, config, dim): pass def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: pass
3
0
6
0
6
0
2
0
1
4
0
0
2
2
2
12
13
1
12
5
9
0
11
5
8
2
1
1
3
1,182
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/clap/modeling_clap.py
transformers.models.clap.modeling_clap.ClapAudioLayer
from torch import nn import torch.nn.functional as F from ...utils import ModelOutput, auto_docstring, can_return_tuple, filter_out_non_signature_kwargs, logging, torch_int import torch from typing import Any, Callable, Optional, Union class ClapAudioLayer(nn.Module): def __init__(self, config, dim, input_resolution, num_heads, drop_path_rate=0.0, shift_size=0): super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.shift_size = shift_size self.window_size = config.window_size self.input_resolution = input_resolution self.layernorm_before = nn.LayerNorm(dim, eps=config.layer_norm_eps) self.attention = ClapAudioAttention(config, dim, num_heads, window_size=self.window_size) self.drop_path = ClapDropPath(drop_path_rate) if drop_path_rate > 0.0 else nn.Identity() self.layernorm_after = nn.LayerNorm(dim, eps=config.layer_norm_eps) self.intermediate = ClapAudioIntermediate(config, dim) self.output = ClapAudioOutput(config, dim) def set_shift_and_window_size(self, input_resolution): if min(input_resolution) <= self.window_size: self.shift_size = torch_int(0) self.window_size = torch.min(torch.tensor(input_resolution)) if torch.jit.is_tracing() else min(input_resolution) def get_attn_mask(self, height, width, dtype, device): if self.shift_size > 0: img_mask = torch.zeros((1, height, width, 1), dtype=dtype, device=device) height_slices = (slice(0, -self.window_size), slice(-self.window_size, -self.shift_size), slice(-self.shift_size, None)) width_slices = (slice(0, -self.window_size), slice(-self.window_size, -self.shift_size), slice(-self.shift_size, None)) count = 0 for height_slice in height_slices: for width_slice in width_slices: img_mask[:, height_slice, width_slice, :] = count count += 1 mask_windows = window_partition(img_mask, self.window_size) mask_windows = mask_windows.view(-1, self.window_size * self.window_size) attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2) attn_mask = attn_mask.masked_fill(attn_mask != 0, -100.0).masked_fill(attn_mask == 0, 0.0) else: attn_mask = None return attn_mask def maybe_pad(self, hidden_states, height, width): pad_right = (self.window_size - width % self.window_size) % self.window_size pad_bottom = (self.window_size - height % self.window_size) % self.window_size pad_values = (0, 0, 0, pad_right, 0, pad_bottom) hidden_states = nn.functional.pad(hidden_states, pad_values) return (hidden_states, pad_values) def forward(self, hidden_states: torch.Tensor, input_dimensions: tuple[int, int], head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False, always_partition: Optional[bool]=False) -> tuple[torch.Tensor, torch.Tensor]: if not always_partition: self.set_shift_and_window_size(input_dimensions) else: pass height, width = input_dimensions batch_size, _, channels = hidden_states.size() shortcut = hidden_states hidden_states = self.layernorm_before(hidden_states) hidden_states = hidden_states.view(batch_size, height, width, channels) hidden_states, pad_values = self.maybe_pad(hidden_states, height, width) _, height_pad, width_pad, _ = hidden_states.shape if self.shift_size > 0: shifted_hidden_states = torch.roll(hidden_states, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2)) else: shifted_hidden_states = hidden_states hidden_states_windows = window_partition(shifted_hidden_states, self.window_size) hidden_states_windows = hidden_states_windows.view(-1, self.window_size * self.window_size, channels) attn_mask = self.get_attn_mask(height_pad, width_pad, dtype=hidden_states.dtype, device=hidden_states_windows.device) attention_outputs = self.attention(hidden_states_windows, attn_mask, head_mask, output_attentions=output_attentions) attention_output = attention_outputs[0] attention_windows = attention_output.view(-1, self.window_size, self.window_size, channels) shifted_windows = window_reverse(attention_windows, self.window_size, height_pad, width_pad) if self.shift_size > 0: attention_windows = torch.roll(shifted_windows, shifts=(self.shift_size, self.shift_size), dims=(1, 2)) else: attention_windows = shifted_windows was_padded = pad_values[3] > 0 or pad_values[5] > 0 if was_padded: attention_windows = attention_windows[:, :height, :width, :].contiguous() attention_windows = attention_windows.view(batch_size, height * width, channels) hidden_states = shortcut + self.drop_path(attention_windows) layer_output = self.layernorm_after(hidden_states) layer_output = self.intermediate(layer_output) layer_output = hidden_states + self.output(layer_output) layer_outputs = (layer_output, attention_outputs[1]) if output_attentions else (layer_output,) return layer_outputs
class ClapAudioLayer(nn.Module): def __init__(self, config, dim, input_resolution, num_heads, drop_path_rate=0.0, shift_size=0): pass def set_shift_and_window_size(self, input_resolution): pass def get_attn_mask(self, height, width, dtype, device): pass def maybe_pad(self, hidden_states, height, width): pass def forward(self, hidden_states: torch.Tensor, input_dimensions: tuple[int, int], head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False, always_partition: Optional[bool]=False) -> tuple[torch.Tensor, torch.Tensor]: pass
6
0
24
3
19
1
3
0.06
1
10
4
0
5
10
5
15
123
19
98
49
85
6
73
42
67
6
1
3
16
1,183
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/clap/modeling_clap.py
transformers.models.clap.modeling_clap.ClapAudioModel
import torch.nn.functional as F from ...utils import ModelOutput, auto_docstring, can_return_tuple, filter_out_non_signature_kwargs, logging, torch_int import torch from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, BaseModelOutputWithPoolingAndCrossAttentions from .configuration_clap import ClapAudioConfig, ClapConfig, ClapTextConfig from typing import Any, Callable, Optional, Union from torch import nn class ClapAudioModel(ClapPreTrainedModel): config: ClapAudioConfig main_input_name = 'input_features' def __init__(self, config: ClapAudioConfig): super().__init__(config) self.audio_encoder = ClapAudioEncoder(config) self.post_init() def get_input_embeddings(self) -> nn.Module: return self.audio_encoder.patch_embed.proj @auto_docstring def forward(self, input_features: Optional[torch.FloatTensor]=None, is_longer: Optional[torch.BoolTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutputWithPooling]: """ is_longer (`torch.FloatTensor`, of shape `(batch_size, 1)`, *optional*): Whether the audio clip is longer than `max_length`. If `True`, a feature fusion will be enabled to enhance the features. Examples: ```python >>> from datasets import load_dataset >>> from transformers import AutoProcessor, ClapAudioModel >>> dataset = load_dataset("hf-internal-testing/ashraq-esc50-1-dog-example") >>> audio_sample = dataset["train"]["audio"][0]["array"] >>> model = ClapAudioModel.from_pretrained("laion/clap-htsat-fused") >>> processor = AutoProcessor.from_pretrained("laion/clap-htsat-fused") >>> inputs = processor(audios=audio_sample, return_tensors="pt") >>> outputs = model(**inputs) >>> last_hidden_state = outputs.last_hidden_state ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return self.audio_encoder(input_features=input_features, is_longer=is_longer, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
class ClapAudioModel(ClapPreTrainedModel): def __init__(self, config: ClapAudioConfig): pass def get_input_embeddings(self) -> nn.Module: pass @auto_docstring def forward(self, input_features: Optional[torch.FloatTensor]=None, is_longer: Optional[torch.BoolTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutputWithPooling]: ''' is_longer (`torch.FloatTensor`, of shape `(batch_size, 1)`, *optional*): Whether the audio clip is longer than `max_length`. If `True`, a feature fusion will be enabled to enhance the features. Examples: ```python >>> from datasets import load_dataset >>> from transformers import AutoProcessor, ClapAudioModel >>> dataset = load_dataset("hf-internal-testing/ashraq-esc50-1-dog-example") >>> audio_sample = dataset["train"]["audio"][0]["array"] >>> model = ClapAudioModel.from_pretrained("laion/clap-htsat-fused") >>> processor = AutoProcessor.from_pretrained("laion/clap-htsat-fused") >>> inputs = processor(audios=audio_sample, return_tensors="pt") >>> outputs = model(**inputs) >>> last_hidden_state = outputs.last_hidden_state ```''' pass
5
1
16
2
9
5
2
0.48
1
5
3
0
3
1
3
4
56
10
31
15
18
15
14
7
10
4
2
0
6
1,184
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/clap/modeling_clap.py
transformers.models.clap.modeling_clap.ClapAudioModelOutput
import torch.nn.functional as F from ...utils import ModelOutput, auto_docstring, can_return_tuple, filter_out_non_signature_kwargs, logging, torch_int import torch from typing import Any, Callable, Optional, Union from dataclasses import dataclass @dataclass @auto_docstring(custom_intro='\n ClapAudio model output to mimic the output of the original implementation.\n ') class ClapAudioModelOutput(ModelOutput): """ audio_embeds (`torch.FloatTensor` of shape `(batch_size, hidden_size)`): The Audio embeddings obtained by applying the projection layer to the pooler_output. """ audio_embeds: Optional[torch.FloatTensor] = None last_hidden_state: Optional[torch.FloatTensor] = None hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None attentions: Optional[tuple[torch.FloatTensor, ...]] = None
@dataclass @auto_docstring(custom_intro='\n ClapAudio model output to mimic the output of the original implementation.\n ') class ClapAudioModelOutput(ModelOutput): ''' audio_embeds (`torch.FloatTensor` of shape `(batch_size, hidden_size)`): The Audio embeddings obtained by applying the projection layer to the pooler_output. ''' pass
3
1
0
0
0
0
0
3.4
1
0
0
0
0
0
0
0
26
4
5
5
4
17
5
5
4
0
1
0
0
1,185
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/clap/modeling_clap.py
transformers.models.clap.modeling_clap.ClapAudioModelWithProjection
from typing import Any, Callable, Optional, Union from torch import nn import torch.nn.functional as F from ...utils import ModelOutput, auto_docstring, can_return_tuple, filter_out_non_signature_kwargs, logging, torch_int import torch from .configuration_clap import ClapAudioConfig, ClapConfig, ClapTextConfig @auto_docstring class ClapAudioModelWithProjection(ClapPreTrainedModel): config: ClapAudioConfig main_input_name = 'input_features' def __init__(self, config: ClapAudioConfig): super().__init__(config) self.audio_model = ClapAudioModel(config) self.audio_projection = ClapProjectionLayer(config) self.post_init() def get_input_embeddings(self) -> nn.Module: return self.audio_model.audio_encoder.patch_embed.proj @can_return_tuple @auto_docstring def forward(self, input_features: Optional[torch.FloatTensor]=None, is_longer: Optional[torch.BoolTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, ClapAudioModelOutput]: """ is_longer (`torch.FloatTensor`, of shape `(batch_size, 1)`, *optional*): Whether the audio clip is longer than `max_length`. If `True`, a feature fusion will be enabled to enhance the features. Examples: ```python >>> from datasets import load_dataset >>> from transformers import ClapAudioModelWithProjection, ClapProcessor >>> model = ClapAudioModelWithProjection.from_pretrained("laion/clap-htsat-fused") >>> processor = ClapProcessor.from_pretrained("laion/clap-htsat-fused") >>> dataset = load_dataset("hf-internal-testing/ashraq-esc50-1-dog-example") >>> audio_sample = dataset["train"]["audio"][0]["array"] >>> inputs = processor(audios=audio_sample, return_tensors="pt") >>> outputs = model(**inputs) >>> audio_embeds = outputs.audio_embeds ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states audio_outputs = self.audio_model(input_features=input_features, is_longer=is_longer, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True) pooled_output = audio_outputs[1] if not return_dict else audio_outputs.pooler_output audio_embeds = self.audio_projection(pooled_output) return ClapAudioModelOutput(audio_embeds=audio_embeds, last_hidden_state=audio_outputs.last_hidden_state, attentions=audio_outputs.attentions, hidden_states=audio_outputs.hidden_states)
@auto_docstring class ClapAudioModelWithProjection(ClapPreTrainedModel): def __init__(self, config: ClapAudioConfig): pass def get_input_embeddings(self) -> nn.Module: pass @can_return_tuple @auto_docstring def forward(self, input_features: Optional[torch.FloatTensor]=None, is_longer: Optional[torch.BoolTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, ClapAudioModelOutput]: ''' is_longer (`torch.FloatTensor`, of shape `(batch_size, 1)`, *optional*): Whether the audio clip is longer than `max_length`. If `True`, a feature fusion will be enabled to enhance the features. Examples: ```python >>> from datasets import load_dataset >>> from transformers import ClapAudioModelWithProjection, ClapProcessor >>> model = ClapAudioModelWithProjection.from_pretrained("laion/clap-htsat-fused") >>> processor = ClapProcessor.from_pretrained("laion/clap-htsat-fused") >>> dataset = load_dataset("hf-internal-testing/ashraq-esc50-1-dog-example") >>> audio_sample = dataset["train"]["audio"][0]["array"] >>> inputs = processor(audios=audio_sample, return_tensors="pt") >>> outputs = model(**inputs) >>> audio_embeds = outputs.audio_embeds ```''' pass
7
1
21
3
13
5
3
0.35
1
7
4
0
3
2
3
4
71
13
43
20
30
15
21
12
17
6
2
1
8
1,186
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/clap/modeling_clap.py
transformers.models.clap.modeling_clap.ClapAudioOutput
import torch import torch.nn.functional as F from torch import nn class ClapAudioOutput(nn.Module): def __init__(self, config, dim): super().__init__() self.dense = nn.Linear(int(config.mlp_ratio * dim), dim) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states
class ClapAudioOutput(nn.Module): def __init__(self, config, dim): pass def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: pass
3
0
4
0
4
0
1
0
1
3
0
0
2
2
2
12
10
1
9
5
6
0
9
5
6
1
1
0
2
1,187
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/clap/modeling_clap.py
transformers.models.clap.modeling_clap.ClapAudioPatchEmbed
import torch import torch.nn.functional as F from torch import nn from .configuration_clap import ClapAudioConfig, ClapConfig, ClapTextConfig class ClapAudioPatchEmbed(nn.Module): """ This module converts the hidden states reshaped as an image to patch embeddings ready to be passed to the Transformer block. """ def __init__(self, config: ClapAudioConfig): super().__init__() img_size = (config.spec_size, config.spec_size) if isinstance(config.spec_size, int) else config.spec_size patch_size = (config.patch_size, config.patch_size) if isinstance(config.patch_size, int) else config.patch_size patch_stride = (config.patch_stride, config.patch_stride) if isinstance(config.patch_stride, int) else config.patch_stride self.img_size = img_size self.patch_stride = patch_stride self.grid_size = (img_size[0] // patch_stride[0], img_size[1] // patch_stride[1]) self.num_patches = self.grid_size[0] * self.grid_size[1] self.flatten = config.flatten_patch_embeds self.enable_fusion = config.enable_fusion padding = ((patch_size[0] - patch_stride[0]) // 2, (patch_size[1] - patch_stride[1]) // 2) scale_factor = 4 if self.enable_fusion and config.fusion_type == 'channel_map' else 1 self.proj = nn.Conv2d(config.patch_embed_input_channels * scale_factor, config.patch_embeds_hidden_size, kernel_size=patch_size, stride=patch_stride, padding=padding) self.norm = nn.LayerNorm(config.patch_embeds_hidden_size) if config.enable_patch_layer_norm else nn.Identity() if self.enable_fusion: self.fusion_model = ClapAudioAFFBlock(config) self.mel_conv2d = nn.Conv2d(config.patch_embed_input_channels, config.patch_embeds_hidden_size, kernel_size=(patch_size[0], patch_size[1] * 3), stride=(patch_stride[0], patch_stride[1] * 3), padding=padding) def forward(self, hidden_states, is_longer_idx=None): if self.enable_fusion: global_hidden_states = hidden_states[:, 0:1, :, :] batch_size, num_channels, height, width = global_hidden_states.shape if height != self.img_size[0] or width != self.img_size[1]: raise ValueError(f"Input audio size ({height}*{width}) doesn't match model ({self.img_size[0]}*{self.img_size[1]}).") global_hidden_states = self.proj(global_hidden_states) output_width = global_hidden_states.size(-1) if len(is_longer_idx) > 0: local_hidden_states = hidden_states[is_longer_idx, 1:, :, :].contiguous() batch_size, num_channels, height, width = local_hidden_states.shape local_hidden_states = local_hidden_states.view(batch_size * num_channels, 1, height, width) local_hidden_states = self.mel_conv2d(local_hidden_states) _, features, height, width = local_hidden_states.shape local_hidden_states = local_hidden_states.view(batch_size, num_channels, features, height, width) local_hidden_states = local_hidden_states.permute((0, 2, 3, 1, 4)).contiguous().flatten(3) local_width = local_hidden_states.size(-1) local_hidden_states = torch.nn.functional.pad(local_hidden_states, (0, output_width - local_width), 'constant', 0) global_hidden_states[is_longer_idx] = self.fusion_model(global_hidden_states[is_longer_idx], local_hidden_states) hidden_states = global_hidden_states else: _, _, height, width = hidden_states.shape if height != self.img_size[0] or width != self.img_size[1]: raise ValueError(f"Input audio size ({height}*{width}) doesn't match model ({self.img_size[0]}*{self.img_size[1]}).") hidden_states = self.proj(hidden_states) if self.flatten: hidden_states = hidden_states.flatten(2).transpose(1, 2) hidden_states = self.norm(hidden_states) return hidden_states
class ClapAudioPatchEmbed(nn.Module): ''' This module converts the hidden states reshaped as an image to patch embeddings ready to be passed to the Transformer block. ''' def __init__(self, config: ClapAudioConfig): pass def forward(self, hidden_states, is_longer_idx=None): pass
3
1
45
8
36
2
7
0.1
1
5
2
0
2
10
2
12
96
17
72
24
69
7
47
24
44
7
1
2
13
1,188
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/clap/modeling_clap.py
transformers.models.clap.modeling_clap.ClapAudioPatchMerging
import torch import torch.nn.functional as F from torch import nn class ClapAudioPatchMerging(nn.Module): """ Patch Merging Layer. Args: input_resolution (`tuple[int]`): Resolution of input feature. dim (`int`): Number of input channels. norm_layer (`nn.Module`, *optional*, defaults to `nn.LayerNorm`): Normalization layer class. """ def __init__(self, input_resolution: tuple[int], dim: int, norm_layer: nn.Module=nn.LayerNorm) -> None: super().__init__() self.input_resolution = input_resolution self.dim = dim self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False) self.norm = norm_layer(4 * dim) def maybe_pad(self, input_feature, height, width): should_pad = height % 2 == 1 or width % 2 == 1 if should_pad: pad_values = (0, 0, 0, width % 2, 0, height % 2) input_feature = nn.functional.pad(input_feature, pad_values) return input_feature def forward(self, input_feature: torch.Tensor, input_dimensions: tuple[int, int]) -> torch.Tensor: height, width = input_dimensions batch_size, dim, num_channels = input_feature.shape input_feature = input_feature.view(batch_size, height, width, num_channels) input_feature = self.maybe_pad(input_feature, height, width) input_feature_0 = input_feature[:, 0::2, 0::2, :] input_feature_1 = input_feature[:, 1::2, 0::2, :] input_feature_2 = input_feature[:, 0::2, 1::2, :] input_feature_3 = input_feature[:, 1::2, 1::2, :] input_feature = torch.cat([input_feature_0, input_feature_1, input_feature_2, input_feature_3], -1) input_feature = input_feature.view(batch_size, -1, 4 * num_channels) input_feature = self.norm(input_feature) input_feature = self.reduction(input_feature) return input_feature
class ClapAudioPatchMerging(nn.Module): ''' Patch Merging Layer. Args: input_resolution (`tuple[int]`): Resolution of input feature. dim (`int`): Number of input channels. norm_layer (`nn.Module`, *optional*, defaults to `nn.LayerNorm`): Normalization layer class. ''' def __init__(self, input_resolution: tuple[int], dim: int, norm_layer: nn.Module=nn.LayerNorm) -> None: pass def maybe_pad(self, input_feature, height, width): pass def forward(self, input_feature: torch.Tensor, input_dimensions: tuple[int, int]) -> torch.Tensor: pass
4
1
12
1
9
3
1
0.67
1
3
0
0
3
4
3
13
52
8
27
16
23
18
27
16
23
2
1
1
4
1,189
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/clap/modeling_clap.py
transformers.models.clap.modeling_clap.ClapAudioSelfAttention
import torch.nn.functional as F import math import torch import collections from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, meshgrid, prune_linear_layer from typing import Any, Callable, Optional, Union from torch import nn class ClapAudioSelfAttention(nn.Module): def __init__(self, config, dim, num_heads, window_size): super().__init__() if dim % num_heads != 0: raise ValueError(f'The hidden size ({dim}) is not a multiple of the number of attention heads ({num_heads})') self.num_attention_heads = num_heads self.attention_head_size = int(dim / num_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.window_size = window_size if isinstance(window_size, collections.abc.Iterable) else (window_size, window_size) self.relative_position_bias_table = nn.Parameter(torch.zeros((2 * self.window_size[0] - 1) * (2 * self.window_size[1] - 1), num_heads)) coords_h = torch.arange(self.window_size[0]) coords_w = torch.arange(self.window_size[1]) coords = torch.stack(meshgrid([coords_h, coords_w], indexing='ij')) coords_flatten = torch.flatten(coords, 1) relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] relative_coords = relative_coords.permute(1, 2, 0).contiguous() relative_coords[:, :, 0] += self.window_size[0] - 1 relative_coords[:, :, 1] += self.window_size[1] - 1 relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1 relative_position_index = relative_coords.sum(-1) self.register_buffer('relative_position_index', relative_position_index) self.query = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias) self.key = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias) self.value = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False) -> tuple[torch.Tensor]: batch_size, dim, num_channels = hidden_states.shape hidden_shape = (batch_size, dim, -1, self.attention_head_size) query_layer = self.query(hidden_states).view(hidden_shape).transpose(1, 2) key_layer = self.key(hidden_states).view(hidden_shape).transpose(1, 2) value_layer = self.value(hidden_states).view(hidden_shape).transpose(1, 2) attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self.attention_head_size) relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)] relative_position_bias = relative_position_bias.view(self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1) relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() attention_scores = attention_scores + relative_position_bias.unsqueeze(0) if attention_mask is not None: mask_shape = attention_mask.shape[0] attention_scores = attention_scores.view(batch_size // mask_shape, mask_shape, self.num_attention_heads, dim, dim) attention_scores = attention_scores + attention_mask.unsqueeze(1).unsqueeze(0) attention_scores = attention_scores.view(-1, self.num_attention_heads, dim, dim) attention_probs = nn.functional.softmax(attention_scores, dim=-1) attention_probs = self.dropout(attention_probs) if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) return outputs
class ClapAudioSelfAttention(nn.Module): def __init__(self, config, dim, num_heads, window_size): pass def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False) -> tuple[torch.Tensor]: pass
3
0
32
6
24
2
3
0.1
1
5
0
0
3
9
3
13
98
19
72
38
62
7
56
32
52
4
1
1
8
1,190
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/clap/modeling_clap.py
transformers.models.clap.modeling_clap.ClapAudioSelfOutput
from torch import nn import torch import torch.nn.functional as F class ClapAudioSelfOutput(nn.Module): def __init__(self, config, dim): super().__init__() self.dense = nn.Linear(dim, dim) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states
class ClapAudioSelfOutput(nn.Module): def __init__(self, config, dim): pass def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: pass
3
0
5
1
4
0
1
0
1
2
0
0
2
2
2
12
11
2
9
5
6
0
9
5
6
1
1
0
2
1,191
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/clap/modeling_clap.py
transformers.models.clap.modeling_clap.ClapAudioStage
from typing import Any, Callable, Optional, Union from torch import nn import torch.nn.functional as F import torch from ...modeling_layers import GradientCheckpointingLayer class ClapAudioStage(GradientCheckpointingLayer): def __init__(self, config, dim, input_resolution, depth, num_heads, drop_path, downsample): super().__init__() self.config = config self.dim = dim self.blocks = nn.ModuleList([ClapAudioLayer(config=config, dim=dim, input_resolution=input_resolution, num_heads=num_heads, drop_path_rate=drop_path[i], shift_size=0 if i % 2 == 0 else config.window_size // 2) for i in range(depth)]) if downsample is not None: self.downsample = downsample(input_resolution, dim=dim, norm_layer=nn.LayerNorm) else: self.downsample = None self.pointing = False def forward(self, hidden_states: torch.Tensor, input_dimensions: tuple[int, int], head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False, always_partition: Optional[bool]=False) -> tuple[torch.Tensor]: height, width = input_dimensions for i, layer_module in enumerate(self.blocks): layer_head_mask = head_mask[i] if head_mask is not None else None layer_outputs = layer_module(hidden_states, input_dimensions, layer_head_mask, output_attentions, always_partition) hidden_states = layer_outputs[0] hidden_states_before_downsampling = hidden_states if self.downsample is not None: height_downsampled, width_downsampled = ((height + 1) // 2, (width + 1) // 2) output_dimensions = (height, width, height_downsampled, width_downsampled) hidden_states = self.downsample(hidden_states_before_downsampling, input_dimensions) else: output_dimensions = (height, width, height, width) stage_outputs = (hidden_states, hidden_states_before_downsampling, output_dimensions) if output_attentions: stage_outputs += layer_outputs[1:] return stage_outputs
class ClapAudioStage(GradientCheckpointingLayer): def __init__(self, config, dim, input_resolution, depth, num_heads, drop_path, downsample): pass def forward(self, hidden_states: torch.Tensor, input_dimensions: tuple[int, int], head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False, always_partition: Optional[bool]=False) -> tuple[torch.Tensor]: pass
3
0
28
4
24
1
4
0.02
1
7
1
0
2
5
2
12
58
8
49
23
39
1
26
16
23
5
1
1
8
1,192
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/clap/modeling_clap.py
transformers.models.clap.modeling_clap.ClapDropPath
import torch import torch.nn.functional as F from torch import nn class ClapDropPath(nn.Module): """ Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). This is a slightly refactored version of the `SwinDropPath` implementation. """ def __init__(self, drop_prob=None): super().__init__() self.drop_prob = drop_prob def forward(self, hidden_states): if self.drop_prob == 0.0 or not self.training: return hidden_states keep_prob = 1 - self.drop_prob shape = (hidden_states.shape[0],) + (1,) * (hidden_states.ndim - 1) random_tensor = keep_prob + torch.rand(shape, dtype=hidden_states.dtype, device=hidden_states.device) random_tensor.floor_() output = hidden_states.div(keep_prob) * random_tensor return output
class ClapDropPath(nn.Module): ''' Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). This is a slightly refactored version of the `SwinDropPath` implementation. ''' def __init__(self, drop_prob=None): pass def forward(self, hidden_states): pass
3
1
8
1
6
1
2
0.46
1
1
0
0
2
1
2
12
22
4
13
8
10
6
13
8
10
2
1
1
3
1,193
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/clap/modeling_clap.py
transformers.models.clap.modeling_clap.ClapModel
import torch.nn.functional as F import math from ...utils import ModelOutput, auto_docstring, can_return_tuple, filter_out_non_signature_kwargs, logging, torch_int import torch from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, BaseModelOutputWithPoolingAndCrossAttentions from .configuration_clap import ClapAudioConfig, ClapConfig, ClapTextConfig from typing import Any, Callable, Optional, Union from torch import nn @auto_docstring class ClapModel(ClapPreTrainedModel): config: ClapConfig def __init__(self, config: ClapConfig): super().__init__(config) if not isinstance(config.text_config, ClapTextConfig): raise TypeError(f'config.text_config is expected to be of type ClapTextConfig but is of type {type(config.text_config)}.') if not isinstance(config.audio_config, ClapAudioConfig): raise TypeError(f'config.audio_config is expected to be of type ClapAudioConfig but is of type {type(config.audio_config)}.') text_config = config.text_config audio_config = config.audio_config self.logit_scale_a = nn.Parameter(torch.tensor(math.log(config.logit_scale_init_value))) self.logit_scale_t = nn.Parameter(torch.tensor(math.log(config.logit_scale_init_value))) self.projection_dim = config.projection_dim self.text_model = ClapTextModel(text_config) self.text_projection = ClapProjectionLayer(text_config) self.audio_model = ClapAudioModel(audio_config) self.audio_projection = ClapProjectionLayer(audio_config) self.post_init() @filter_out_non_signature_kwargs() @auto_docstring def get_text_features(self, input_ids: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None) -> torch.FloatTensor: """ Returns: text_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by applying the projection layer to the pooled output of [`ClapTextModel`]. Examples: ```python >>> import torch >>> from transformers import AutoTokenizer, ClapModel >>> model = ClapModel.from_pretrained("laion/clap-htsat-unfused") >>> tokenizer = AutoTokenizer.from_pretrained("laion/clap-htsat-unfused") >>> inputs = tokenizer(["the sound of a cat", "the sound of a dog"], padding=True, return_tensors="pt") >>> with torch.inference_mode(): ... text_features = model.get_text_features(**inputs) ```""" text_outputs: BaseModelOutputWithPooling = self.text_model(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids) text_features = self.text_projection(text_outputs.pooler_output) text_features = F.normalize(text_features, dim=-1) return text_features @filter_out_non_signature_kwargs() @auto_docstring def get_audio_features(self, input_features: torch.Tensor, is_longer: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None) -> torch.FloatTensor: """ is_longer (`torch.FloatTensor`, of shape `(batch_size, 1)`, *optional*): Whether the audio clip is longer than `max_length`. If `True`, a feature fusion will be enabled to enhance the features. Returns: audio_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The audio embeddings obtained by applying the projection layer to the pooled output of [`ClapAudioModel`]. Examples: ```python >>> import torch >>> from transformers import AutoFeatureExtractor, ClapModel >>> model = ClapModel.from_pretrained("laion/clap-htsat-unfused") >>> feature_extractor = AutoFeatureExtractor.from_pretrained("laion/clap-htsat-unfused") >>> random_audio = torch.rand((16_000)) >>> inputs = feature_extractor(random_audio, return_tensors="pt") >>> with torch.inference_mode(): ... audio_features = model.get_audio_features(**inputs) ```""" audio_outputs: BaseModelOutputWithPooling = self.audio_model(input_features=input_features, is_longer=is_longer) audio_features = self.audio_projection(audio_outputs.pooler_output) audio_features = F.normalize(audio_features, dim=-1) return audio_features @can_return_tuple @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, input_features: Optional[torch.FloatTensor]=None, is_longer: Optional[torch.BoolTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, return_loss: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, ClapOutput]: """ is_longer (`torch.FloatTensor`, of shape `(batch_size, 1)`, *optional*): Whether the audio clip is longer than `max_length`. If `True`, a feature fusion will be enabled to enhance the features. return_loss (`bool`, *optional*): Whether or not to return the contrastive loss. Examples: ```python >>> from datasets import load_dataset >>> from transformers import AutoProcessor, ClapModel >>> dataset = load_dataset("hf-internal-testing/ashraq-esc50-1-dog-example") >>> audio_sample = dataset["train"]["audio"][0]["array"] >>> model = ClapModel.from_pretrained("laion/clap-htsat-unfused") >>> processor = AutoProcessor.from_pretrained("laion/clap-htsat-unfused") >>> input_text = ["Sound of a dog", "Sound of vacuum cleaner"] >>> inputs = processor(text=input_text, audios=audio_sample, return_tensors="pt", padding=True) >>> outputs = model(**inputs) >>> logits_per_audio = outputs.logits_per_audio # this is the audio-text similarity score >>> probs = logits_per_audio.softmax(dim=-1) # we can take the softmax to get the label probabilities ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict audio_outputs = self.audio_model(input_features=input_features, is_longer=is_longer, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True) text_outputs = self.text_model(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True) audio_embeds = audio_outputs[1] if not return_dict else audio_outputs.pooler_output audio_embeds = self.audio_projection(audio_embeds) text_embeds = text_outputs[1] if not return_dict else text_outputs.pooler_output text_embeds = self.text_projection(text_embeds) audio_embeds = audio_embeds / audio_embeds.norm(p=2, dim=-1, keepdim=True) text_embeds = text_embeds / text_embeds.norm(p=2, dim=-1, keepdim=True) logit_scale_text = self.logit_scale_t.exp() logit_scale_audio = self.logit_scale_a.exp() logits_per_text = torch.matmul(text_embeds, audio_embeds.t()) * logit_scale_text logits_per_audio = torch.matmul(audio_embeds, text_embeds.t()) * logit_scale_audio loss = None if return_loss: caption_loss = contrastive_loss(logits_per_text) audio_loss = contrastive_loss(logits_per_audio.t()) loss = (caption_loss + audio_loss) / 2.0 return ClapOutput(loss=loss, logits_per_audio=logits_per_audio, logits_per_text=logits_per_text, text_embeds=text_embeds, audio_embeds=audio_embeds, text_model_output=text_outputs, audio_model_output=audio_outputs)
@auto_docstring class ClapModel(ClapPreTrainedModel): def __init__(self, config: ClapConfig): pass @filter_out_non_signature_kwargs() @auto_docstring def get_text_features(self, input_ids: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None) -> torch.FloatTensor: ''' Returns: text_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by applying the projection layer to the pooled output of [`ClapTextModel`]. Examples: ```python >>> import torch >>> from transformers import AutoTokenizer, ClapModel >>> model = ClapModel.from_pretrained("laion/clap-htsat-unfused") >>> tokenizer = AutoTokenizer.from_pretrained("laion/clap-htsat-unfused") >>> inputs = tokenizer(["the sound of a cat", "the sound of a dog"], padding=True, return_tensors="pt") >>> with torch.inference_mode(): ... text_features = model.get_text_features(**inputs) ```''' pass @filter_out_non_signature_kwargs() @auto_docstring def get_audio_features(self, input_features: torch.Tensor, is_longer: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None) -> torch.FloatTensor: ''' is_longer (`torch.FloatTensor`, of shape `(batch_size, 1)`, *optional*): Whether the audio clip is longer than `max_length`. If `True`, a feature fusion will be enabled to enhance the features. Returns: audio_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The audio embeddings obtained by applying the projection layer to the pooled output of [`ClapAudioModel`]. Examples: ```python >>> import torch >>> from transformers import AutoFeatureExtractor, ClapModel >>> model = ClapModel.from_pretrained("laion/clap-htsat-unfused") >>> feature_extractor = AutoFeatureExtractor.from_pretrained("laion/clap-htsat-unfused") >>> random_audio = torch.rand((16_000)) >>> inputs = feature_extractor(random_audio, return_tensors="pt") >>> with torch.inference_mode(): ... audio_features = model.get_audio_features(**inputs) ```''' pass @can_return_tuple @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, input_features: Optional[torch.FloatTensor]=None, is_longer: Optional[torch.BoolTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, return_loss: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, ClapOutput]: ''' is_longer (`torch.FloatTensor`, of shape `(batch_size, 1)`, *optional*): Whether the audio clip is longer than `max_length`. If `True`, a feature fusion will be enabled to enhance the features. return_loss (`bool`, *optional*): Whether or not to return the contrastive loss. Examples: ```python >>> from datasets import load_dataset >>> from transformers import AutoProcessor, ClapModel >>> dataset = load_dataset("hf-internal-testing/ashraq-esc50-1-dog-example") >>> audio_sample = dataset["train"]["audio"][0]["array"] >>> model = ClapModel.from_pretrained("laion/clap-htsat-unfused") >>> processor = AutoProcessor.from_pretrained("laion/clap-htsat-unfused") >>> input_text = ["Sound of a dog", "Sound of vacuum cleaner"] >>> inputs = processor(text=input_text, audios=audio_sample, return_tensors="pt", padding=True) >>> outputs = model(**inputs) >>> logits_per_audio = outputs.logits_per_audio # this is the audio-text similarity score >>> probs = logits_per_audio.softmax(dim=-1) # we can take the softmax to get the label probabilities ```''' pass
12
3
54
10
33
12
6
0.35
1
12
7
0
4
7
4
5
225
42
136
63
100
47
61
33
56
9
2
1
22
1,194
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/clap/modeling_clap.py
transformers.models.clap.modeling_clap.ClapOutput
import torch.nn.functional as F from ...utils import ModelOutput, auto_docstring, can_return_tuple, filter_out_non_signature_kwargs, logging, torch_int import torch from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, BaseModelOutputWithPoolingAndCrossAttentions from typing import Any, Callable, Optional, Union from dataclasses import dataclass @dataclass @auto_docstring class ClapOutput(ModelOutput): """ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`): Contrastive loss for audio-text similarity. logits_per_audio (`torch.FloatTensor` of shape `(audio_batch_size, text_batch_size)`): The scaled dot product scores between `audio_embeds` and `text_embeds`. This represents the audio-text similarity scores. logits_per_text (`torch.FloatTensor` of shape `(text_batch_size, audio_batch_size)`): The scaled dot product scores between `text_embeds` and `audio_embeds`. This represents the text-audio similarity scores. text_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by applying the projection layer to the pooled output of [`ClapTextModel`]. audio_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim`): The audio embeddings obtained by applying the projection layer to the pooled output of [`ClapAudioModel`]. text_model_output (`BaseModelOutputWithPooling`): The output of the [`ClapTextModel`]. audio_model_output (`BaseModelOutputWithPooling`): The output of the [`ClapAudioModel`]. """ loss: Optional[torch.FloatTensor] = None logits_per_audio: Optional[torch.FloatTensor] = None logits_per_text: Optional[torch.FloatTensor] = None text_embeds: Optional[torch.FloatTensor] = None audio_embeds: Optional[torch.FloatTensor] = None text_model_output: BaseModelOutputWithPooling = None audio_model_output: BaseModelOutputWithPooling = None def to_tuple(self) -> tuple[Any]: return tuple((self[k] if k not in ['text_model_output', 'audio_model_output'] else getattr(self, k).to_tuple() for k in self.keys()))
@dataclass @auto_docstring class ClapOutput(ModelOutput): ''' loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`): Contrastive loss for audio-text similarity. logits_per_audio (`torch.FloatTensor` of shape `(audio_batch_size, text_batch_size)`): The scaled dot product scores between `audio_embeds` and `text_embeds`. This represents the audio-text similarity scores. logits_per_text (`torch.FloatTensor` of shape `(text_batch_size, audio_batch_size)`): The scaled dot product scores between `text_embeds` and `audio_embeds`. This represents the text-audio similarity scores. text_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by applying the projection layer to the pooled output of [`ClapTextModel`]. audio_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim`): The audio embeddings obtained by applying the projection layer to the pooled output of [`ClapAudioModel`]. text_model_output (`BaseModelOutputWithPooling`): The output of the [`ClapTextModel`]. audio_model_output (`BaseModelOutputWithPooling`): The output of the [`ClapAudioModel`]. ''' def to_tuple(self) -> tuple[Any]: pass
4
1
5
0
5
0
2
1.46
1
2
0
0
1
0
1
1
34
2
13
9
11
19
10
9
8
2
1
0
2
1,195
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/clap/modeling_clap.py
transformers.models.clap.modeling_clap.ClapPreTrainedModel
import math from ...utils import ModelOutput, auto_docstring, can_return_tuple, filter_out_non_signature_kwargs, logging, torch_int from .configuration_clap import ClapAudioConfig, ClapConfig, ClapTextConfig from torch import nn from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel @auto_docstring class ClapPreTrainedModel(PreTrainedModel): config: ClapConfig base_model_prefix = 'clap' supports_gradient_checkpointing = False def _init_weights(self, module: nn.Module): """Initialize the weights""" factor = self.config.initializer_factor if isinstance(module, ClapTextEmbeddings): module.position_embeddings.weight.data.normal_(mean=0.0, std=factor * 0.02) module.token_type_embeddings.weight.data.normal_(mean=0.0, std=factor * 0.02) elif isinstance(module, ClapModel): module.logit_scale_a.data.fill_(math.log(self.config.logit_scale_init_value)) module.logit_scale_t.data.fill_(math.log(self.config.logit_scale_init_value)) elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=factor * 0.02) elif isinstance(module, (nn.LayerNorm, nn.BatchNorm2d)): module.bias.data.zero_() module.weight.data.fill_(1.0) elif isinstance(module, (nn.Conv2d, nn.Linear)): in_proj_std = self.config.hidden_size ** (-0.5) * (2 * self.config.num_hidden_layers) ** (-0.5) * factor nn.init.normal_(module.weight, std=in_proj_std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, ClapAudioSelfAttention): module.relative_position_bias_table.data.zero_()
@auto_docstring class ClapPreTrainedModel(PreTrainedModel): def _init_weights(self, module: nn.Module): '''Initialize the weights''' pass
3
1
21
2
18
1
7
0.23
1
2
2
5
1
0
1
1
31
4
22
7
20
5
18
7
16
7
1
2
7
1,196
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/clap/modeling_clap.py
transformers.models.clap.modeling_clap.ClapProjectionLayer
from torch import nn from .configuration_clap import ClapAudioConfig, ClapConfig, ClapTextConfig from ...activations import ACT2FN from typing import Any, Callable, Optional, Union class ClapProjectionLayer(nn.Module): def __init__(self, config: Union[ClapAudioConfig, ClapTextConfig]): super().__init__() self.config = config hidden_size = config.hidden_size projection_dim = config.projection_dim self.linear1 = nn.Linear(hidden_size, projection_dim) self.activation = ACT2FN[config.projection_hidden_act] self.linear2 = nn.Linear(projection_dim, projection_dim) def forward(self, hidden_states): hidden_states = self.linear1(hidden_states) hidden_states = self.activation(hidden_states) hidden_states = self.linear2(hidden_states) return hidden_states
class ClapProjectionLayer(nn.Module): def __init__(self, config: Union[ClapAudioConfig, ClapTextConfig]): pass def forward(self, hidden_states): pass
3
0
7
1
7
0
1
0
1
3
2
0
2
4
2
12
16
2
14
9
11
0
14
9
11
1
1
0
2
1,197
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/clap/modeling_clap.py
transformers.models.clap.modeling_clap.ClapTextAttention
import torch from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, meshgrid, prune_linear_layer from typing import Any, Callable, Optional, Union from torch import nn import torch.nn.functional as F class ClapTextAttention(nn.Module): def __init__(self, config): super().__init__() self.self = ClapTextSelfAttention(config) self.output = ClapTextSelfOutput(config) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices(heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads) self.self.query = prune_linear_layer(self.self.query, index) self.self.key = prune_linear_layer(self.self.key, index) self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False, **kwargs) -> tuple[torch.Tensor]: self_outputs = self.self(hidden_states, attention_mask=attention_mask, head_mask=head_mask, output_attentions=output_attentions, **kwargs) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] return outputs
class ClapTextAttention(nn.Module): def __init__(self, config): pass def prune_heads(self, heads): pass def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False, **kwargs) -> tuple[torch.Tensor]: pass
4
0
15
1
14
1
1
0.07
1
5
1
0
3
3
3
13
49
4
43
20
30
3
22
11
18
2
1
1
4
1,198
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/clap/modeling_clap.py
transformers.models.clap.modeling_clap.ClapTextEmbeddings
from typing import Any, Callable, Optional, Union import torch import torch.nn.functional as F from torch import nn class ClapTextEmbeddings(nn.Module): """Construct the embeddings from word, position and token_type embeddings.""" def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.position_embedding_type = getattr(config, 'position_embedding_type', 'absolute') self.register_buffer('position_ids', torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=True) self.register_buffer('token_type_ids', torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=True) self.padding_idx = config.pad_token_id self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx) def forward(self, input_ids: Optional[torch.LongTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, past_key_values_length: int=0) -> torch.Tensor: if position_ids is None: if input_ids is not None: position_ids = self.create_position_ids_from_input_ids(input_ids, self.padding_idx, past_key_values_length) else: position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds, self.padding_idx) if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] batch_size, seq_length = input_shape if token_type_ids is None: if hasattr(self, 'token_type_ids'): buffered_token_type_ids = self.token_type_ids.expand(position_ids.shape[0], -1) buffered_token_type_ids = torch.gather(buffered_token_type_ids, dim=1, index=position_ids) token_type_ids = buffered_token_type_ids.expand(batch_size, seq_length) else: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = inputs_embeds + token_type_embeddings if self.position_embedding_type == 'absolute': position_embeddings = self.position_embeddings(position_ids) embeddings += position_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings @staticmethod def create_position_ids_from_inputs_embeds(inputs_embeds, padding_idx): """ We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids. Args: inputs_embeds: torch.Tensor Returns: torch.Tensor """ input_shape = inputs_embeds.size()[:-1] sequence_length = input_shape[1] position_ids = torch.arange(padding_idx + 1, sequence_length + padding_idx + 1, dtype=torch.long, device=inputs_embeds.device) return position_ids.unsqueeze(0).expand(input_shape) @staticmethod def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_length=0): """ Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols are ignored. This is modified from fairseq's `utils.make_positions`. Args: x: torch.Tensor x: Returns: torch.Tensor """ mask = input_ids.ne(padding_idx).int() incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask return incremental_indices.long() + padding_idx
class ClapTextEmbeddings(nn.Module): '''Construct the embeddings from word, position and token_type embeddings.''' def __init__(self, config): pass def forward(self, input_ids: Optional[torch.LongTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, past_key_values_length: int=0) -> torch.Tensor: pass @staticmethod def create_position_ids_from_inputs_embeds(inputs_embeds, padding_idx): ''' We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids. Args: inputs_embeds: torch.Tensor Returns: torch.Tensor ''' pass @staticmethod def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_length=0): ''' Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols are ignored. This is modified from fairseq's `utils.make_positions`. Args: x: torch.Tensor x: Returns: torch.Tensor ''' pass
7
3
26
3
18
5
3
0.32
1
1
0
0
3
7
3
13
87
13
56
23
50
18
43
21
39
8
1
2
10
1,199
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/clap/modeling_clap.py
transformers.models.clap.modeling_clap.ClapTextEncoder
import torch.nn.functional as F from ...utils import ModelOutput, auto_docstring, can_return_tuple, filter_out_non_signature_kwargs, logging, torch_int import torch from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, BaseModelOutputWithPoolingAndCrossAttentions from typing import Any, Callable, Optional, Union from torch import nn class ClapTextEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.layer = nn.ModuleList([ClapTextLayer(config) for i in range(config.num_hidden_layers)]) self.gradient_checkpointing = False @can_return_tuple def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False, output_hidden_states: Optional[bool]=False, return_dict: Optional[bool]=True, **kwargs) -> Union[tuple[torch.Tensor], BaseModelOutput]: all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None layer_outputs = layer_module(hidden_states=hidden_states, attention_mask=attention_mask, head_mask=layer_head_mask, output_attentions=output_attentions, **kwargs) hidden_states = layer_outputs[0] if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions)
class ClapTextEncoder(nn.Module): def __init__(self, config): pass @can_return_tuple def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False, output_hidden_states: Optional[bool]=False, return_dict: Optional[bool]=True, **kwargs) -> Union[tuple[torch.Tensor], BaseModelOutput]: pass
4
0
45
4
41
0
9
0
1
8
2
0
2
3
2
12
91
8
83
26
68
0
35
14
32
17
1
3
18