id
int64 0
328k
| repository_name
stringlengths 7
58
| file_path
stringlengths 9
302
| class_name
stringlengths 5
256
| human_written_code
stringlengths 16
2.16M
| class_skeleton
stringlengths 18
1.49M
⌀ | total_program_units
int64 1
1.76k
| total_doc_str
int64 0
771
| AvgCountLine
float64 0
7.89k
| AvgCountLineBlank
float64 0
297
| AvgCountLineCode
float64 0
7.89k
| AvgCountLineComment
float64 0
7.89k
| AvgCyclomatic
float64 0
130
| CommentToCodeRatio
float64 0
168
| CountClassBase
float64 0
40
| CountClassCoupled
float64 0
583
| CountClassCoupledModified
float64 0
575
| CountClassDerived
float64 0
5.35k
| CountDeclInstanceMethod
float64 0
529
| CountDeclInstanceVariable
float64 0
296
| CountDeclMethod
float64 0
599
| CountDeclMethodAll
float64 0
1.12k
| CountLine
float64 1
40.4k
| CountLineBlank
float64 0
8.16k
| CountLineCode
float64 1
25.7k
| CountLineCodeDecl
float64 1
8.15k
| CountLineCodeExe
float64 0
24.2k
| CountLineComment
float64 0
16.5k
| CountStmt
float64 1
9.71k
| CountStmtDecl
float64 1
8.15k
| CountStmtExe
float64 0
9.69k
| MaxCyclomatic
float64 0
759
| MaxInheritanceTree
float64 0
16
| MaxNesting
float64 0
34
| SumCyclomatic
float64 0
2.9k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2,300
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/electra/modeling_electra.py
|
transformers.models.electra.modeling_electra.ElectraEmbeddings
|
from typing import Callable, Optional, Union
import torch
from torch import nn
class ElectraEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings."""
def __init__(self, config):
super().__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.embedding_size, padding_idx=config.pad_token_id)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.embedding_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.embedding_size)
self.LayerNorm = nn.LayerNorm(config.embedding_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.register_buffer('position_ids', torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False)
self.position_embedding_type = getattr(config, 'position_embedding_type', 'absolute')
self.register_buffer('token_type_ids', torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False)
def forward(self, input_ids: Optional[torch.LongTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, past_key_values_length: int=0) -> torch.Tensor:
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
batch_size, seq_length = input_shape
if position_ids is None:
position_ids = self.position_ids[:, past_key_values_length:seq_length + past_key_values_length]
if token_type_ids is None:
if hasattr(self, 'token_type_ids'):
buffered_token_type_ids = self.token_type_ids.expand(position_ids.shape[0], -1)
buffered_token_type_ids = torch.gather(buffered_token_type_ids, dim=1, index=position_ids)
token_type_ids = buffered_token_type_ids.expand(batch_size, seq_length)
else:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + token_type_embeddings
if self.position_embedding_type == 'absolute':
position_embeddings = self.position_embeddings(position_ids)
embeddings += position_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
|
class ElectraEmbeddings(nn.Module):
'''Construct the embeddings from word, position and token_type embeddings.'''
def __init__(self, config):
pass
def forward(self, input_ids: Optional[torch.LongTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, past_key_values_length: int=0) -> torch.Tensor:
pass
| 3
| 1
| 30
| 4
| 23
| 3
| 4
| 0.17
| 1
| 3
| 0
| 0
| 2
| 6
| 2
| 12
| 64
| 9
| 47
| 23
| 37
| 8
| 34
| 16
| 31
| 7
| 1
| 2
| 8
|
2,301
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/electra/modeling_electra.py
|
transformers.models.electra.modeling_electra.ElectraEncoder
|
import torch
from ...modeling_outputs import BaseModelOutputWithCrossAttentions, BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput
from ...utils import ModelOutput, TransformersKwargs, auto_docstring, is_torch_flex_attn_available, logging
from typing import Callable, Optional, Union
from ...processing_utils import Unpack
from torch import nn
from ...cache_utils import Cache, EncoderDecoderCache
class ElectraEncoder(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.layer = nn.ModuleList([ElectraLayer(config, layer_idx=i) for i in range(config.num_hidden_layers)])
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_values: Optional[Cache]=None, use_cache: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]:
for i, layer_module in enumerate(self.layer):
layer_head_mask = head_mask[i] if head_mask is not None else None
hidden_states = layer_module(hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_value=past_key_values, cache_position=cache_position, **kwargs)
return BaseModelOutputWithPastAndCrossAttentions(last_hidden_state=hidden_states, past_key_values=past_key_values if use_cache else None)
|
class ElectraEncoder(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_values: Optional[Cache]=None, use_cache: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]:
pass
| 3
| 0
| 45
| 4
| 41
| 0
| 9
| 0
| 1
| 8
| 2
| 0
| 2
| 3
| 2
| 12
| 91
| 8
| 83
| 26
| 68
| 0
| 35
| 14
| 32
| 17
| 1
| 3
| 18
|
2,302
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/electra/modeling_electra.py
|
transformers.models.electra.modeling_electra.ElectraForCausalLM
|
from ...processing_utils import Unpack
import torch
from ...cache_utils import Cache, EncoderDecoderCache
from torch import nn
from ...generation import GenerationMixin
from ...modeling_outputs import BaseModelOutputWithCrossAttentions, BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput
from ...utils import ModelOutput, TransformersKwargs, auto_docstring, is_torch_flex_attn_available, logging
from typing import Callable, Optional, Union
from ...utils.generic import can_return_tuple, check_model_inputs
@auto_docstring(custom_intro='\n ELECTRA Model with a `language modeling` head on top for CLM fine-tuning.\n ')
class ElectraForCausalLM(ElectraPreTrainedModel, GenerationMixin):
_tied_weights_keys = ['generator_lm_head.weight']
def __init__(self, config):
super().__init__(config)
if not config.is_decoder:
logger.warning('If you want to use `ElectraForCausalLM` as a standalone, add `is_decoder=True.`')
self.electra = ElectraModel(config)
self.generator_predictions = ElectraGeneratorPredictions(config)
self.generator_lm_head = nn.Linear(config.embedding_size, config.vocab_size)
self.init_weights()
def get_output_embeddings(self):
return self.generator_lm_head
def set_output_embeddings(self, new_embeddings):
self.generator_lm_head = new_embeddings
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, use_cache: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple[torch.Tensor], CausalLMOutputWithCrossAttentions]:
"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in
`[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are
ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
Example:
```python
>>> from transformers import AutoTokenizer, ElectraForCausalLM, ElectraConfig
>>> import torch
>>> tokenizer = AutoTokenizer.from_pretrained("google/electra-base-generator")
>>> config = ElectraConfig.from_pretrained("google/electra-base-generator")
>>> config.is_decoder = True
>>> model = ElectraForCausalLM.from_pretrained("google/electra-base-generator", config=config)
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> prediction_logits = outputs.logits
```"""
if labels is not None:
use_cache = False
outputs = self.electra(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, use_cache=use_cache, cache_position=cache_position, return_dict=True, **kwargs)
sequence_output = outputs[0]
prediction_scores = self.generator_lm_head(self.generator_predictions(sequence_output))
lm_loss = None
if labels is not None:
lm_loss = self.loss_function(prediction_scores, labels, vocab_size=self.config.vocab_size, **kwargs)
return CausalLMOutputWithCrossAttentions(loss=lm_loss, logits=prediction_scores, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions)
|
@auto_docstring(custom_intro='\n ELECTRA Model with a `language modeling` head on top for CLM fine-tuning.\n ')
class ElectraForCausalLM(ElectraPreTrainedModel, GenerationMixin):
def __init__(self, config):
pass
def get_output_embeddings(self):
pass
def set_output_embeddings(self, new_embeddings):
pass
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, use_cache: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple[torch.Tensor], CausalLMOutputWithCrossAttentions]:
'''
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in
`[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are
ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
Example:
```python
>>> from transformers import AutoTokenizer, ElectraForCausalLM, ElectraConfig
>>> import torch
>>> tokenizer = AutoTokenizer.from_pretrained("google/electra-base-generator")
>>> config = ElectraConfig.from_pretrained("google/electra-base-generator")
>>> config.is_decoder = True
>>> model = ElectraForCausalLM.from_pretrained("google/electra-base-generator", config=config)
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> prediction_logits = outputs.logits
```'''
pass
| 8
| 1
| 25
| 3
| 15
| 7
| 2
| 0.44
| 2
| 7
| 3
| 0
| 5
| 3
| 5
| 6
| 137
| 22
| 80
| 35
| 55
| 35
| 33
| 17
| 27
| 6
| 2
| 1
| 12
|
2,303
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/electra/modeling_electra.py
|
transformers.models.electra.modeling_electra.ElectraForMaskedLM
|
from ...utils.generic import can_return_tuple, check_model_inputs
from typing import Callable, Optional, Union
from ...processing_utils import Unpack
from ...utils import ModelOutput, TransformersKwargs, auto_docstring, is_torch_flex_attn_available, logging
from torch import nn
from ...modeling_outputs import BaseModelOutputWithCrossAttentions, BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput
import torch
@auto_docstring(custom_intro='\n Electra model with a language modeling head on top.\n\n Even though both the discriminator and generator may be loaded into this model, the generator is the only model of\n the two to have been trained for the masked language modeling task.\n ')
class ElectraForMaskedLM(ElectraPreTrainedModel):
_tied_weights_keys = ['generator_lm_head.weight']
def __init__(self, config):
super().__init__(config)
self.electra = ElectraModel(config)
self.generator_predictions = ElectraGeneratorPredictions(config)
self.generator_lm_head = nn.Linear(config.embedding_size, config.vocab_size)
self.post_init()
def get_output_embeddings(self):
return self.generator_lm_head
def set_output_embeddings(self, word_embeddings):
self.generator_lm_head = word_embeddings
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple[torch.Tensor], MaskedLMOutput]:
"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
"""
generator_hidden_states = self.electra(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, return_dict=True, **kwargs)
generator_sequence_output = generator_hidden_states[0]
prediction_scores = self.generator_predictions(generator_sequence_output)
prediction_scores = self.generator_lm_head(prediction_scores)
loss = None
if labels is not None:
loss_fct = nn.CrossEntropyLoss()
loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
return MaskedLMOutput(loss=loss, logits=prediction_scores, hidden_states=generator_hidden_states.hidden_states, attentions=generator_hidden_states.attentions)
|
@auto_docstring(custom_intro='\n Electra model with a language modeling head on top.\n\n Even though both the discriminator and generator may be loaded into this model, the generator is the only model of\n the two to have been trained for the masked language modeling task.\n ')
class ElectraForMaskedLM(ElectraPreTrainedModel):
def __init__(self, config):
pass
def get_output_embeddings(self):
pass
def set_output_embeddings(self, word_embeddings):
pass
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple[torch.Tensor], MaskedLMOutput]:
'''
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
'''
pass
| 8
| 1
| 17
| 2
| 13
| 2
| 2
| 0.15
| 1
| 6
| 3
| 0
| 4
| 3
| 4
| 5
| 81
| 11
| 62
| 28
| 36
| 9
| 26
| 15
| 21
| 5
| 2
| 1
| 8
|
2,304
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/electra/modeling_electra.py
|
transformers.models.electra.modeling_electra.ElectraForMultipleChoice
|
import torch
from ...processing_utils import Unpack
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...utils.generic import can_return_tuple, check_model_inputs
from ...modeling_outputs import BaseModelOutputWithCrossAttentions, BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput
from ...utils import ModelOutput, TransformersKwargs, auto_docstring, is_torch_flex_attn_available, logging
from typing import Callable, Optional, Union
@auto_docstring
class ElectraForMultipleChoice(ElectraPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.electra = ElectraModel(config)
self.sequence_summary = ElectraSequenceSummary(config)
self.classifier = nn.Linear(config.hidden_size, 1)
self.post_init()
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple[torch.Tensor], MultipleChoiceModelOutput]:
"""
input_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
token_type_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
1]`:
- 0 corresponds to a *sentence A* token,
- 1 corresponds to a *sentence B* token.
[What are token type IDs?](../glossary#token-type-ids)
position_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
config.max_position_embeddings - 1]`.
[What are position IDs?](../glossary#position-ids)
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_choices, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
model's internal embedding lookup matrix.
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
`input_ids` above)
"""
num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
inputs_embeds = inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1)) if inputs_embeds is not None else None
discriminator_hidden_states = self.electra(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, return_dict=True, **kwargs)
sequence_output = discriminator_hidden_states[0]
pooled_output = self.sequence_summary(sequence_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.view(-1, num_choices)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
return MultipleChoiceModelOutput(loss=loss, logits=reshaped_logits, hidden_states=discriminator_hidden_states.hidden_states, attentions=discriminator_hidden_states.attentions)
|
@auto_docstring
class ElectraForMultipleChoice(ElectraPreTrainedModel):
def __init__(self, config):
pass
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple[torch.Tensor], MultipleChoiceModelOutput]:
'''
input_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
token_type_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
1]`:
- 0 corresponds to a *sentence A* token,
- 1 corresponds to a *sentence B* token.
[What are token type IDs?](../glossary#token-type-ids)
position_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
config.max_position_embeddings - 1]`.
[What are position IDs?](../glossary#position-ids)
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_choices, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
model's internal embedding lookup matrix.
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
`input_ids` above)
'''
pass
| 6
| 1
| 37
| 5
| 29
| 4
| 6
| 0.11
| 1
| 5
| 2
| 0
| 2
| 3
| 2
| 3
| 82
| 10
| 65
| 28
| 44
| 7
| 28
| 15
| 25
| 11
| 2
| 1
| 12
|
2,305
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/electra/modeling_electra.py
|
transformers.models.electra.modeling_electra.ElectraForPreTraining
|
import torch
from torch import nn
from ...utils.generic import can_return_tuple, check_model_inputs
from ...utils import ModelOutput, TransformersKwargs, auto_docstring, is_torch_flex_attn_available, logging
from typing import Callable, Optional, Union
from ...processing_utils import Unpack
@auto_docstring(custom_intro='\n Electra model with a binary classification head on top as used during pretraining for identifying generated tokens.\n\n It is recommended to load the discriminator checkpoint into that model.\n ')
class ElectraForPreTraining(ElectraPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.electra = ElectraModel(config)
self.discriminator_predictions = ElectraDiscriminatorPredictions(config)
self.post_init()
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple[torch.Tensor], ElectraForPreTrainingOutput]:
"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the ELECTRA loss. Input should be a sequence of tokens (see `input_ids` docstring)
Indices should be in `[0, 1]`:
- 0 indicates the token is an original token,
- 1 indicates the token was replaced.
Examples:
```python
>>> from transformers import ElectraForPreTraining, AutoTokenizer
>>> import torch
>>> discriminator = ElectraForPreTraining.from_pretrained("google/electra-base-discriminator")
>>> tokenizer = AutoTokenizer.from_pretrained("google/electra-base-discriminator")
>>> sentence = "The quick brown fox jumps over the lazy dog"
>>> fake_sentence = "The quick brown fox fake over the lazy dog"
>>> fake_tokens = tokenizer.tokenize(fake_sentence, add_special_tokens=True)
>>> fake_inputs = tokenizer.encode(fake_sentence, return_tensors="pt")
>>> discriminator_outputs = discriminator(fake_inputs)
>>> predictions = torch.round((torch.sign(discriminator_outputs[0]) + 1) / 2)
>>> fake_tokens
['[CLS]', 'the', 'quick', 'brown', 'fox', 'fake', 'over', 'the', 'lazy', 'dog', '[SEP]']
>>> predictions.squeeze().tolist()
[0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0]
```"""
discriminator_hidden_states = self.electra(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, return_dict=True, **kwargs)
discriminator_sequence_output = discriminator_hidden_states[0]
logits = self.discriminator_predictions(discriminator_sequence_output)
loss = None
if labels is not None:
loss_fct = nn.BCEWithLogitsLoss()
if attention_mask is not None:
active_loss = attention_mask.view(-1, discriminator_sequence_output.shape[1]) == 1
active_logits = logits.view(-1, discriminator_sequence_output.shape[1])[active_loss]
active_labels = labels[active_loss]
loss = loss_fct(active_logits, active_labels.float())
else:
loss = loss_fct(logits.view(-1, discriminator_sequence_output.shape[1]), labels.float())
return ElectraForPreTrainingOutput(loss=loss, logits=logits, hidden_states=discriminator_hidden_states.hidden_states, attentions=discriminator_hidden_states.attentions)
|
@auto_docstring(custom_intro='\n Electra model with a binary classification head on top as used during pretraining for identifying generated tokens.\n\n It is recommended to load the discriminator checkpoint into that model.\n ')
class ElectraForPreTraining(ElectraPreTrainedModel):
def __init__(self, config):
pass
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple[torch.Tensor], ElectraForPreTrainingOutput]:
'''
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the ELECTRA loss. Input should be a sequence of tokens (see `input_ids` docstring)
Indices should be in `[0, 1]`:
- 0 indicates the token is an original token,
- 1 indicates the token was replaced.
Examples:
```python
>>> from transformers import ElectraForPreTraining, AutoTokenizer
>>> import torch
>>> discriminator = ElectraForPreTraining.from_pretrained("google/electra-base-discriminator")
>>> tokenizer = AutoTokenizer.from_pretrained("google/electra-base-discriminator")
>>> sentence = "The quick brown fox jumps over the lazy dog"
>>> fake_sentence = "The quick brown fox fake over the lazy dog"
>>> fake_tokens = tokenizer.tokenize(fake_sentence, add_special_tokens=True)
>>> fake_inputs = tokenizer.encode(fake_sentence, return_tensors="pt")
>>> discriminator_outputs = discriminator(fake_inputs)
>>> predictions = torch.round((torch.sign(discriminator_outputs[0]) + 1) / 2)
>>> fake_tokens
['[CLS]', 'the', 'quick', 'brown', 'fox', 'fake', 'over', 'the', 'lazy', 'dog', '[SEP]']
>>> predictions.squeeze().tolist()
[0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0]
```'''
pass
| 6
| 1
| 46
| 8
| 26
| 13
| 4
| 0.46
| 1
| 6
| 3
| 0
| 2
| 2
| 2
| 3
| 95
| 16
| 54
| 27
| 37
| 25
| 24
| 14
| 21
| 6
| 2
| 2
| 7
|
2,306
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/electra/modeling_electra.py
|
transformers.models.electra.modeling_electra.ElectraForPreTrainingOutput
|
from ...utils import ModelOutput, TransformersKwargs, auto_docstring, is_torch_flex_attn_available, logging
import torch
from typing import Callable, Optional, Union
from dataclasses import dataclass
@dataclass
@auto_docstring(custom_intro='\n Output type of [`ElectraForPreTraining`].\n ')
class ElectraForPreTrainingOutput(ModelOutput):
"""
loss (*optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`):
Total loss of the ELECTRA objective.
logits (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):
Prediction scores of the head (scores for each token before SoftMax).
"""
loss: Optional[torch.FloatTensor] = None
logits: Optional[torch.FloatTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor]] = None
attentions: Optional[tuple[torch.FloatTensor]] = None
|
@dataclass
@auto_docstring(custom_intro='\n Output type of [`ElectraForPreTraining`].\n ')
class ElectraForPreTrainingOutput(ModelOutput):
'''
loss (*optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`):
Total loss of the ELECTRA objective.
logits (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):
Prediction scores of the head (scores for each token before SoftMax).
'''
pass
| 3
| 1
| 0
| 0
| 0
| 0
| 0
| 3.4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 26
| 4
| 5
| 5
| 4
| 17
| 5
| 5
| 4
| 0
| 1
| 0
| 0
|
2,307
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/electra/modeling_electra.py
|
transformers.models.electra.modeling_electra.ElectraForQuestionAnswering
|
from ...utils.generic import can_return_tuple, check_model_inputs
from typing import Callable, Optional, Union
from .configuration_electra import ElectraConfig
from ...utils import ModelOutput, TransformersKwargs, auto_docstring, is_torch_flex_attn_available, logging
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
import torch
from ...modeling_outputs import BaseModelOutputWithCrossAttentions, BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput
from ...processing_utils import Unpack
from torch import nn
@auto_docstring
class ElectraForQuestionAnswering(ElectraPreTrainedModel):
config_class = ElectraConfig
base_model_prefix = 'electra'
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.electra = ElectraModel(config)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
self.post_init()
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, start_positions: Optional[torch.Tensor]=None, end_positions: Optional[torch.Tensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple[torch.Tensor], QuestionAnsweringModelOutput]:
discriminator_hidden_states = self.electra(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, return_dict=True, **kwargs)
sequence_output = discriminator_hidden_states[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1).contiguous()
end_logits = end_logits.squeeze(-1).contiguous()
total_loss = None
if start_positions is not None and end_positions is not None:
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
ignored_index = start_logits.size(1)
start_positions = start_positions.clamp(0, ignored_index)
end_positions = end_positions.clamp(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
return QuestionAnsweringModelOutput(loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=discriminator_hidden_states.hidden_states, attentions=discriminator_hidden_states.attentions)
|
@auto_docstring
class ElectraForQuestionAnswering(ElectraPreTrainedModel):
def __init__(self, config):
pass
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, start_positions: Optional[torch.Tensor]=None, end_positions: Optional[torch.Tensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple[torch.Tensor], QuestionAnsweringModelOutput]:
pass
| 6
| 0
| 42
| 5
| 31
| 7
| 4
| 0.17
| 1
| 5
| 2
| 0
| 2
| 3
| 2
| 3
| 99
| 11
| 75
| 32
| 49
| 13
| 34
| 18
| 31
| 7
| 2
| 2
| 8
|
2,308
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/electra/modeling_electra.py
|
transformers.models.electra.modeling_electra.ElectraForSequenceClassification
|
from ...processing_utils import Unpack
import torch
from ...modeling_outputs import BaseModelOutputWithCrossAttentions, BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from typing import Callable, Optional, Union
from ...utils.generic import can_return_tuple, check_model_inputs
from ...utils import ModelOutput, TransformersKwargs, auto_docstring, is_torch_flex_attn_available, logging
@auto_docstring(custom_intro='\n ELECTRA Model transformer with a sequence classification/regression head on top (a linear layer on top of the\n pooled output) e.g. for GLUE tasks.\n ')
class ElectraForSequenceClassification(ElectraPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.config = config
self.electra = ElectraModel(config)
self.classifier = ElectraClassificationHead(config)
self.post_init()
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple[torch.Tensor], SequenceClassifierOutput]:
"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
discriminator_hidden_states = self.electra(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, return_dict=True, **kwargs)
sequence_output = discriminator_hidden_states[0]
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
self.config.problem_type = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = 'single_label_classification'
else:
self.config.problem_type = 'multi_label_classification'
if self.config.problem_type == 'regression':
loss_fct = MSELoss()
if self.num_labels == 1:
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(logits, labels)
elif self.config.problem_type == 'single_label_classification':
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
elif self.config.problem_type == 'multi_label_classification':
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
return SequenceClassifierOutput(loss=loss, logits=logits, hidden_states=discriminator_hidden_states.hidden_states, attentions=discriminator_hidden_states.attentions)
|
@auto_docstring(custom_intro='\n ELECTRA Model transformer with a sequence classification/regression head on top (a linear layer on top of the\n pooled output) e.g. for GLUE tasks.\n ')
class ElectraForSequenceClassification(ElectraPreTrainedModel):
def __init__(self, config):
pass
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple[torch.Tensor], SequenceClassifierOutput]:
'''
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
'''
pass
| 6
| 1
| 39
| 4
| 32
| 4
| 7
| 0.1
| 1
| 7
| 3
| 0
| 2
| 4
| 2
| 3
| 88
| 8
| 73
| 26
| 50
| 7
| 33
| 13
| 30
| 12
| 2
| 3
| 13
|
2,309
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/electra/modeling_electra.py
|
transformers.models.electra.modeling_electra.ElectraForTokenClassification
|
from ...utils.generic import can_return_tuple, check_model_inputs
from typing import Callable, Optional, Union
from ...modeling_outputs import BaseModelOutputWithCrossAttentions, BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput
from ...utils import ModelOutput, TransformersKwargs, auto_docstring, is_torch_flex_attn_available, logging
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from torch import nn
import torch
from ...processing_utils import Unpack
@auto_docstring(custom_intro='\n Electra model with a token classification head on top.\n\n Both the discriminator and generator may be loaded into this model.\n ')
class ElectraForTokenClassification(ElectraPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.electra = ElectraModel(config)
classifier_dropout = config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
self.dropout = nn.Dropout(classifier_dropout)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.post_init()
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple[torch.Tensor], TokenClassifierOutput]:
"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
"""
discriminator_hidden_states = self.electra(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, return_dict=True, **kwargs)
discriminator_sequence_output = discriminator_hidden_states[0]
discriminator_sequence_output = self.dropout(discriminator_sequence_output)
logits = self.classifier(discriminator_sequence_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
return TokenClassifierOutput(loss=loss, logits=logits, hidden_states=discriminator_hidden_states.hidden_states, attentions=discriminator_hidden_states.attentions)
|
@auto_docstring(custom_intro='\n Electra model with a token classification head on top.\n\n Both the discriminator and generator may be loaded into this model.\n ')
class ElectraForTokenClassification(ElectraPreTrainedModel):
def __init__(self, config):
pass
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple[torch.Tensor], TokenClassifierOutput]:
'''
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
'''
pass
| 6
| 1
| 31
| 3
| 26
| 3
| 4
| 0.08
| 1
| 5
| 2
| 0
| 2
| 4
| 2
| 3
| 72
| 7
| 60
| 27
| 37
| 5
| 23
| 14
| 20
| 5
| 2
| 1
| 7
|
2,310
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/electra/modeling_electra.py
|
transformers.models.electra.modeling_electra.ElectraGeneratorPredictions
|
from torch import nn
from ...activations import ACT2FN, get_activation
class ElectraGeneratorPredictions(nn.Module):
"""Prediction module for the generator, made up of two dense layers."""
def __init__(self, config):
super().__init__()
self.activation = get_activation('gelu')
self.LayerNorm = nn.LayerNorm(config.embedding_size, eps=config.layer_norm_eps)
self.dense = nn.Linear(config.hidden_size, config.embedding_size)
def forward(self, generator_hidden_states):
hidden_states = self.dense(generator_hidden_states)
hidden_states = self.activation(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
|
class ElectraGeneratorPredictions(nn.Module):
'''Prediction module for the generator, made up of two dense layers.'''
def __init__(self, config):
pass
def forward(self, generator_hidden_states):
pass
| 3
| 1
| 6
| 1
| 5
| 0
| 1
| 0.09
| 1
| 1
| 0
| 0
| 2
| 3
| 2
| 12
| 16
| 4
| 11
| 7
| 8
| 1
| 11
| 7
| 8
| 1
| 1
| 0
| 2
|
2,311
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/electra/modeling_electra.py
|
transformers.models.electra.modeling_electra.ElectraIntermediate
|
import torch
from torch import nn
from ...activations import ACT2FN, get_activation
class ElectraIntermediate(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
|
class ElectraIntermediate(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 6
| 0
| 6
| 0
| 2
| 0
| 1
| 3
| 0
| 0
| 2
| 2
| 2
| 12
| 13
| 1
| 12
| 5
| 9
| 0
| 11
| 5
| 8
| 2
| 1
| 1
| 3
|
2,312
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/electra/modeling_electra.py
|
transformers.models.electra.modeling_electra.ElectraLayer
|
from ...utils import ModelOutput, TransformersKwargs, auto_docstring, is_torch_flex_attn_available, logging
from typing import Callable, Optional, Union
from ...modeling_layers import GradientCheckpointingLayer
from ...processing_utils import Unpack
from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
from ...cache_utils import Cache, EncoderDecoderCache
import torch
class ElectraLayer(GradientCheckpointingLayer):
def __init__(self, config, layer_idx=None):
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = ElectraAttention(config, is_causal=config.is_decoder, layer_idx=layer_idx)
self.is_decoder = config.is_decoder
self.add_cross_attention = config.add_cross_attention
if self.add_cross_attention:
if not self.is_decoder:
raise ValueError(f'{self} should be used as a decoder model if cross attention is added')
self.crossattention = ElectraAttention(config, position_embedding_type='absolute', is_causal=False, layer_idx=layer_idx, is_cross_attention=True)
self.intermediate = ElectraIntermediate(config)
self.output = ElectraOutput(config)
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_value: Optional[Cache]=None, cache_position: Optional[torch.Tensor]=None, **kwargs: Unpack[TransformersKwargs]) -> tuple[torch.Tensor]:
self_attention_output, _ = self.attention(hidden_states, attention_mask, head_mask, past_key_value=past_key_value, cache_position=cache_position, **kwargs)
attention_output = self_attention_output
if self.is_decoder and encoder_hidden_states is not None:
if not hasattr(self, 'crossattention'):
raise ValueError(f'If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`')
cross_attention_output, _ = self.crossattention(self_attention_output, None, head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value=past_key_value, **kwargs)
attention_output = cross_attention_output
layer_output = apply_chunking_to_forward(self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output)
return layer_output
def feed_forward_chunk(self, attention_output):
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
|
class ElectraLayer(GradientCheckpointingLayer):
def __init__(self, config, layer_idx=None):
pass
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_value: Optional[Cache]=None, cache_position: Optional[torch.Tensor]=None, **kwargs: Unpack[TransformersKwargs]) -> tuple[torch.Tensor]:
pass
def feed_forward_chunk(self, attention_output):
pass
| 4
| 0
| 27
| 2
| 23
| 2
| 4
| 0.1
| 1
| 7
| 3
| 0
| 3
| 8
| 3
| 13
| 84
| 9
| 70
| 32
| 57
| 7
| 41
| 23
| 37
| 7
| 1
| 2
| 11
|
2,313
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/electra/modeling_electra.py
|
transformers.models.electra.modeling_electra.ElectraModel
|
import torch
from ...cache_utils import Cache, EncoderDecoderCache
from ...utils.generic import can_return_tuple, check_model_inputs
from ...modeling_outputs import BaseModelOutputWithCrossAttentions, BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput
from torch import nn
from ...utils import ModelOutput, TransformersKwargs, auto_docstring, is_torch_flex_attn_available, logging
from ...masking_utils import create_causal_mask
from typing import Callable, Optional, Union
from ...modeling_attn_mask_utils import _prepare_4d_attention_mask, _prepare_4d_attention_mask_for_sdpa
from ...processing_utils import Unpack
@auto_docstring
class ElectraModel(ElectraPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.embeddings = ElectraEmbeddings(config)
if config.embedding_size != config.hidden_size:
self.embeddings_project = nn.Linear(config.embedding_size, config.hidden_size)
self.encoder = ElectraEncoder(config)
self.config = config
self.gradient_checkpointing = False
self.post_init()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@check_model_inputs
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, past_key_values: Optional[list[torch.FloatTensor]]=None, use_cache: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple[torch.Tensor], BaseModelOutputWithCrossAttentions]:
if self.config.is_decoder:
use_cache = use_cache if use_cache is not None else self.config.use_cache
else:
use_cache = False
return_legacy_cache = False
if use_cache and (not isinstance(past_key_values, Cache)):
logger.warning_once('Passing a tuple of `past_key_values` is deprecated and will be removed in Transformers v4.58.0. You should pass an instance of `EncoderDecoderCache` instead, e.g. `past_key_values=EncoderDecoderCache.from_legacy_cache(past_key_values)`.')
return_legacy_cache = True
past_key_values = EncoderDecoderCache.from_legacy_cache(past_key_values)
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError('You must specify exactly one of input_ids or inputs_embeds')
if input_ids is not None:
device = input_ids.device
input_shape = input_ids.shape
else:
device = inputs_embeds.device
input_shape = inputs_embeds.shape[:-1]
seq_length = input_shape[1]
past_key_values_length = past_key_values.get_seq_length() if past_key_values is not None else 0
if cache_position is None:
cache_position = torch.arange(past_key_values_length, past_key_values_length + seq_length, device=device)
embedding_output = self.embeddings(input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, past_key_values_length=past_key_values_length)
if hasattr(self, 'embeddings_project'):
embedding_output = self.embeddings_project(embedding_output)
attention_mask, encoder_attention_mask = self._create_attention_masks(input_shape=input_shape, attention_mask=attention_mask, encoder_attention_mask=encoder_attention_mask, embedding_output=embedding_output, encoder_hidden_states=encoder_hidden_states, cache_position=cache_position, past_key_values=past_key_values)
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
encoder_outputs = self.encoder(embedding_output, attention_mask=attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, use_cache=use_cache, position_ids=position_ids, **kwargs)
if return_legacy_cache:
encoder_outputs.past_key_values = encoder_outputs.past_key_values.to_legacy_cache()
return BaseModelOutputWithPastAndCrossAttentions(last_hidden_state=encoder_outputs.last_hidden_state, past_key_values=encoder_outputs.past_key_values)
def _create_attention_masks(self, input_shape, attention_mask, encoder_attention_mask, embedding_output, encoder_hidden_states, cache_position, past_key_values):
if attention_mask is not None and attention_mask.dim() == 2:
if self.config.is_decoder:
attention_mask = create_causal_mask(config=self.config, input_embeds=embedding_output, attention_mask=attention_mask, cache_position=cache_position, past_key_values=past_key_values)
else:
attention_mask = self._update_full_mask(attention_mask, embedding_output)
elif attention_mask is not None and attention_mask.dim() == 3:
if 'flash' in self.config._attn_implementation or self.config._attn_implementation == 'flex_attention':
raise ValueError(f'Passing attention mask with a 3D/4D shape does not work with type {self.config._attn_implementation} - please use either `sdpa` or `eager` instead.')
attention_mask = self.get_extended_attention_mask(attention_mask, input_shape)
if encoder_attention_mask is not None:
if encoder_attention_mask.dim() == 2:
encoder_attention_mask = self._update_cross_attn_mask(encoder_hidden_states, encoder_attention_mask, embedding_output.shape[:2], embedding_output)
else:
if 'flash' in self.config._attn_implementation or self.config._attn_implementation == 'flex_attention':
raise ValueError(f'Passing attention mask with a 3D/4D shape does not work with type {self.config._attn_implementation} - please use either `sdpa` or `eager` instead.')
encoder_attention_mask = self.invert_attention_mask(encoder_attention_mask)
return (attention_mask, encoder_attention_mask)
def _update_full_mask(self, attention_mask: Union[torch.Tensor, None], inputs_embeds: torch.Tensor):
if attention_mask is not None:
if 'flash' in self.config._attn_implementation:
attention_mask = attention_mask if 0 in attention_mask else None
elif self.config._attn_implementation == 'sdpa':
attention_mask = _prepare_4d_attention_mask_for_sdpa(attention_mask, inputs_embeds.dtype)
elif self.config._attn_implementation == 'flex_attention':
if isinstance(attention_mask, torch.Tensor):
attention_mask = make_flex_block_causal_mask(attention_mask, is_causal=False)
else:
attention_mask = _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype)
return attention_mask
def _update_cross_attn_mask(self, encoder_hidden_states: Union[torch.Tensor, None], encoder_attention_mask: Union[torch.Tensor, None], input_shape: torch.Size, inputs_embeds: torch.Tensor):
if encoder_hidden_states is not None and encoder_attention_mask is not None:
if 'flash' in self.config._attn_implementation:
encoder_attention_mask = encoder_attention_mask if 0 in encoder_attention_mask else None
elif self.config._attn_implementation == 'sdpa':
encoder_attention_mask = _prepare_4d_attention_mask_for_sdpa(encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1])
elif self.config._attn_implementation == 'flex_attention':
if isinstance(encoder_attention_mask, torch.Tensor):
encoder_attention_mask = make_flex_block_causal_mask(encoder_attention_mask, query_length=input_shape[-1], is_causal=False)
else:
encoder_attention_mask = _prepare_4d_attention_mask(encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1])
return encoder_attention_mask
|
@auto_docstring
class ElectraModel(ElectraPreTrainedModel):
def __init__(self, config):
pass
def get_input_embeddings(self):
pass
def set_input_embeddings(self, value):
pass
def _prune_heads(self, heads_to_prune):
'''
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
'''
pass
@check_model_inputs
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, past_key_values: Optional[list[torch.FloatTensor]]=None, use_cache: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple[torch.Tensor], BaseModelOutputWithCrossAttentions]:
pass
def _create_attention_masks(self, input_shape, attention_mask, encoder_attention_mask, embedding_output, encoder_hidden_states, cache_position, past_key_values):
pass
def _update_full_mask(self, attention_mask: Union[torch.Tensor, None], inputs_embeds: torch.Tensor):
pass
def _update_cross_attn_mask(self, encoder_hidden_states: Union[torch.Tensor, None], encoder_attention_mask: Union[torch.Tensor, None], input_shape: torch.Size, inputs_embeds: torch.Tensor):
pass
| 12
| 1
| 22
| 3
| 18
| 2
| 4
| 0.08
| 1
| 7
| 3
| 0
| 5
| 4
| 5
| 6
| 121
| 17
| 96
| 38
| 69
| 8
| 51
| 22
| 45
| 15
| 2
| 2
| 21
|
2,314
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/electra/modeling_electra.py
|
transformers.models.electra.modeling_electra.ElectraOutput
|
from torch import nn
import torch
class ElectraOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
|
class ElectraOutput(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 5
| 0
| 5
| 0
| 1
| 0
| 1
| 2
| 0
| 0
| 2
| 3
| 2
| 12
| 12
| 1
| 11
| 6
| 8
| 0
| 11
| 6
| 8
| 1
| 1
| 0
| 2
|
2,315
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/electra/modeling_electra.py
|
transformers.models.electra.modeling_electra.ElectraPreTrainedModel
|
from ...utils import ModelOutput, TransformersKwargs, auto_docstring, is_torch_flex_attn_available, logging
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from .configuration_electra import ElectraConfig
from torch import nn
@auto_docstring
class ElectraPreTrainedModel(PreTrainedModel):
config_class = ElectraConfig
base_model_prefix = 'electra'
supports_gradient_checkpointing = True
_supports_flash_attn = True
_supports_sdpa = True
_supports_flex_attn = True
_supports_attention_backend = True
_can_record_outputs = {'hidden_states': ElectraLayer, 'attentions': ElectraSelfAttention, 'cross_attentions': ElectraCrossAttention}
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, nn.Linear):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
|
@auto_docstring
class ElectraPreTrainedModel(PreTrainedModel):
def _init_weights(self, module):
'''Initialize the weights'''
pass
| 3
| 1
| 15
| 0
| 12
| 3
| 6
| 0.47
| 1
| 0
| 0
| 8
| 1
| 0
| 1
| 1
| 27
| 2
| 17
| 6
| 15
| 8
| 15
| 6
| 13
| 6
| 1
| 2
| 6
|
2,316
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/electra/modeling_electra.py
|
transformers.models.electra.modeling_electra.ElectraSelfAttention
|
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
import torch
from ...cache_utils import Cache, EncoderDecoderCache
from torch import nn
from ...utils import ModelOutput, TransformersKwargs, auto_docstring, is_torch_flex_attn_available, logging
from typing import Callable, Optional, Union
from ...processing_utils import Unpack
class ElectraSelfAttention(nn.Module):
def __init__(self, config, position_embedding_type=None, is_causal=False, layer_idx=None):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and (not hasattr(config, 'embedding_size')):
raise ValueError(f'The hidden size ({config.hidden_size}) is not a multiple of the number of attention heads ({config.num_attention_heads})')
self.config = config
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.scaling = self.attention_head_size ** (-0.5)
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
self.position_embedding_type = position_embedding_type or getattr(config, 'position_embedding_type', 'absolute')
if self.position_embedding_type == 'relative_key' or self.position_embedding_type == 'relative_key_query':
self.max_position_embeddings = config.max_position_embeddings
self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
self.is_decoder = config.is_decoder
self.is_causal = is_causal
self.layer_idx = layer_idx
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, past_key_value: Optional[Cache]=None, cache_position: Optional[torch.Tensor]=None, **kwargs: Unpack[TransformersKwargs]) -> tuple[torch.Tensor]:
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, self.attention_head_size)
query_layer = self.query(hidden_states).view(*hidden_shape).transpose(1, 2)
key_layer = self.key(hidden_states).view(*hidden_shape).transpose(1, 2)
value_layer = self.value(hidden_states).view(*hidden_shape).transpose(1, 2)
if past_key_value is not None:
current_past_key_value = past_key_value
if isinstance(past_key_value, EncoderDecoderCache):
current_past_key_value = past_key_value.self_attention_cache
key_layer, value_layer = current_past_key_value.update(key_layer, value_layer, self.layer_idx, {'cache_position': cache_position})
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != 'eager':
if self.position_embedding_type != 'absolute':
raise ValueError(f'You are using {self.config._attn_implementation} as attention type. However, non-absolute positional embeddings can not work with them. Please load the model with `attn_implementation="eager"`.')
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(self, query_layer, key_layer, value_layer, attention_mask, dropout=0.0 if not self.training else self.dropout.p, scaling=self.scaling, head_mask=head_mask, use_cache=past_key_value is not None, **kwargs)
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
return (attn_output, attn_weights)
|
class ElectraSelfAttention(nn.Module):
def __init__(self, config, position_embedding_type=None, is_causal=False, layer_idx=None):
pass
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, past_key_value: Optional[Cache]=None, cache_position: Optional[torch.Tensor]=None, **kwargs: Unpack[TransformersKwargs]) -> tuple[torch.Tensor]:
pass
| 3
| 0
| 43
| 7
| 31
| 6
| 6
| 0.19
| 1
| 5
| 0
| 0
| 3
| 11
| 3
| 13
| 132
| 22
| 93
| 44
| 80
| 18
| 72
| 35
| 68
| 13
| 1
| 2
| 17
|
2,317
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/electra/modeling_electra.py
|
transformers.models.electra.modeling_electra.ElectraSelfOutput
|
from torch import nn
import torch
class ElectraSelfOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
|
class ElectraSelfOutput(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 5
| 0
| 5
| 0
| 1
| 0
| 1
| 2
| 0
| 0
| 2
| 3
| 2
| 12
| 12
| 1
| 11
| 6
| 8
| 0
| 11
| 6
| 8
| 1
| 1
| 0
| 2
|
2,318
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/electra/tokenization_electra.py
|
transformers.models.electra.tokenization_electra.ElectraTokenizer
|
from ...tokenization_utils import PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace
import os
import collections
from typing import Optional
class ElectraTokenizer(PreTrainedTokenizer):
"""
Construct a Electra tokenizer. Based on WordPiece.
This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
this superclass for more information regarding those methods.
Args:
vocab_file (`str`):
File containing the vocabulary.
do_lower_case (`bool`, *optional*, defaults to `True`):
Whether or not to lowercase the input when tokenizing.
do_basic_tokenize (`bool`, *optional*, defaults to `True`):
Whether or not to do basic tokenization before WordPiece.
never_split (`Iterable`, *optional*):
Collection of tokens which will never be split during tokenization. Only has an effect when
`do_basic_tokenize=True`
unk_token (`str`, *optional*, defaults to `"[UNK]"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
sep_token (`str`, *optional*, defaults to `"[SEP]"`):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
sequence classification or for a text and a question for question answering. It is also used as the last
token of a sequence built with special tokens.
pad_token (`str`, *optional*, defaults to `"[PAD]"`):
The token used for padding, for example when batching sequences of different lengths.
cls_token (`str`, *optional*, defaults to `"[CLS]"`):
The classifier token which is used when doing sequence classification (classification of the whole sequence
instead of per-token classification). It is the first token of the sequence when built with special tokens.
mask_token (`str`, *optional*, defaults to `"[MASK]"`):
The token used for masking values. This is the token used when training this model with masked language
modeling. This is the token which the model will try to predict.
tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
Whether or not to tokenize Chinese characters.
This should likely be deactivated for Japanese (see this
[issue](https://github.com/huggingface/transformers/issues/328)).
strip_accents (`bool`, *optional*):
Whether or not to strip all accents. If this option is not specified, then it will be determined by the
value for `lowercase` (as in the original Electra).
clean_up_tokenization_spaces (`bool`, *optional*, defaults to `True`):
Whether or not to cleanup spaces after decoding, cleanup consists in removing potential artifacts like
extra spaces.
"""
vocab_files_names = VOCAB_FILES_NAMES
def __init__(self, vocab_file, do_lower_case=True, do_basic_tokenize=True, never_split=None, unk_token='[UNK]', sep_token='[SEP]', pad_token='[PAD]', cls_token='[CLS]', mask_token='[MASK]', tokenize_chinese_chars=True, strip_accents=None, clean_up_tokenization_spaces=True, **kwargs):
if not os.path.isfile(vocab_file):
raise ValueError(f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained model use `tokenizer = ElectraTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`")
self.vocab = load_vocab(vocab_file)
self.ids_to_tokens = collections.OrderedDict([(ids, tok) for tok, ids in self.vocab.items()])
self.do_basic_tokenize = do_basic_tokenize
if do_basic_tokenize:
self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case, never_split=never_split, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents)
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=str(unk_token))
super().__init__(do_lower_case=do_lower_case, do_basic_tokenize=do_basic_tokenize, never_split=never_split, unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents, clean_up_tokenization_spaces=clean_up_tokenization_spaces, **kwargs)
@property
def do_lower_case(self):
return self.basic_tokenizer.do_lower_case
@property
def vocab_size(self):
return len(self.vocab)
def get_vocab(self):
return dict(self.vocab, **self.added_tokens_encoder)
def _tokenize(self, text, split_special_tokens=False):
split_tokens = []
if self.do_basic_tokenize:
for token in self.basic_tokenizer.tokenize(text, never_split=self.all_special_tokens if not split_special_tokens else None):
if token in self.basic_tokenizer.never_split:
split_tokens.append(token)
else:
split_tokens += self.wordpiece_tokenizer.tokenize(token)
else:
split_tokens = self.wordpiece_tokenizer.tokenize(text)
return split_tokens
def _convert_token_to_id(self, token):
"""Converts a token (str) in an id using the vocab."""
return self.vocab.get(token, self.vocab.get(self.unk_token))
def _convert_id_to_token(self, index):
"""Converts an index (integer) in a token (str) using the vocab."""
return self.ids_to_tokens.get(index, self.unk_token)
def convert_tokens_to_string(self, tokens):
"""Converts a sequence of tokens (string) in a single string."""
out_string = ' '.join(tokens).replace(' ##', '').strip()
return out_string
def build_inputs_with_special_tokens(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]:
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. A Electra sequence has the following format:
- single sequence: `[CLS] X [SEP]`
- pair of sequences: `[CLS] A [SEP] B [SEP]`
Args:
token_ids_0 (`List[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
"""
if token_ids_1 is None:
return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
cls = [self.cls_token_id]
sep = [self.sep_token_id]
return cls + token_ids_0 + sep + token_ids_1 + sep
def get_special_tokens_mask(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None, already_has_special_tokens: bool=False) -> list[int]:
"""
Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer `prepare_for_model` method.
Args:
token_ids_0 (`List[int]`):
List of IDs.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
already_has_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not the token list is already formatted with special tokens for the model.
Returns:
`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
"""
if already_has_special_tokens:
return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True)
if token_ids_1 is not None:
return [1] + [0] * len(token_ids_0) + [1] + [0] * len(token_ids_1) + [1]
return [1] + [0] * len(token_ids_0) + [1]
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]:
index = 0
if os.path.isdir(save_directory):
vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
else:
vocab_file = (filename_prefix + '-' if filename_prefix else '') + save_directory
with open(vocab_file, 'w', encoding='utf-8') as writer:
for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]):
if index != token_index:
logger.warning(f'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive. Please check that the vocabulary is not corrupted!')
index = token_index
writer.write(token + '\n')
index += 1
return (vocab_file,)
|
class ElectraTokenizer(PreTrainedTokenizer):
'''
Construct a Electra tokenizer. Based on WordPiece.
This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
this superclass for more information regarding those methods.
Args:
vocab_file (`str`):
File containing the vocabulary.
do_lower_case (`bool`, *optional*, defaults to `True`):
Whether or not to lowercase the input when tokenizing.
do_basic_tokenize (`bool`, *optional*, defaults to `True`):
Whether or not to do basic tokenization before WordPiece.
never_split (`Iterable`, *optional*):
Collection of tokens which will never be split during tokenization. Only has an effect when
`do_basic_tokenize=True`
unk_token (`str`, *optional*, defaults to `"[UNK]"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
sep_token (`str`, *optional*, defaults to `"[SEP]"`):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
sequence classification or for a text and a question for question answering. It is also used as the last
token of a sequence built with special tokens.
pad_token (`str`, *optional*, defaults to `"[PAD]"`):
The token used for padding, for example when batching sequences of different lengths.
cls_token (`str`, *optional*, defaults to `"[CLS]"`):
The classifier token which is used when doing sequence classification (classification of the whole sequence
instead of per-token classification). It is the first token of the sequence when built with special tokens.
mask_token (`str`, *optional*, defaults to `"[MASK]"`):
The token used for masking values. This is the token used when training this model with masked language
modeling. This is the token which the model will try to predict.
tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
Whether or not to tokenize Chinese characters.
This should likely be deactivated for Japanese (see this
[issue](https://github.com/huggingface/transformers/issues/328)).
strip_accents (`bool`, *optional*):
Whether or not to strip all accents. If this option is not specified, then it will be determined by the
value for `lowercase` (as in the original Electra).
clean_up_tokenization_spaces (`bool`, *optional*, defaults to `True`):
Whether or not to cleanup spaces after decoding, cleanup consists in removing potential artifacts like
extra spaces.
'''
def __init__(self, vocab_file, do_lower_case=True, do_basic_tokenize=True, never_split=None, unk_token='[UNK]', sep_token='[SEP]', pad_token='[PAD]', cls_token='[CLS]', mask_token='[MASK]', tokenize_chinese_chars=True, strip_accents=None, clean_up_tokenization_spaces=True, **kwargs):
pass
@property
def do_lower_case(self):
pass
@property
def vocab_size(self):
pass
def get_vocab(self):
pass
def _tokenize(self, text, split_special_tokens=False):
pass
def _convert_token_to_id(self, token):
'''Converts a token (str) in an id using the vocab.'''
pass
def _convert_id_to_token(self, index):
'''Converts an index (integer) in a token (str) using the vocab.'''
pass
def convert_tokens_to_string(self, tokens):
'''Converts a sequence of tokens (string) in a single string.'''
pass
def build_inputs_with_special_tokens(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]:
'''
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. A Electra sequence has the following format:
- single sequence: `[CLS] X [SEP]`
- pair of sequences: `[CLS] A [SEP] B [SEP]`
Args:
token_ids_0 (`List[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
'''
pass
def get_special_tokens_mask(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None, already_has_special_tokens: bool=False) -> list[int]:
'''
Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer `prepare_for_model` method.
Args:
token_ids_0 (`List[int]`):
List of IDs.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
already_has_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not the token list is already formatted with special tokens for the model.
Returns:
`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
'''
pass
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]:
pass
| 14
| 6
| 15
| 1
| 10
| 4
| 2
| 0.72
| 1
| 9
| 2
| 0
| 12
| 5
| 12
| 101
| 236
| 29
| 121
| 53
| 85
| 87
| 65
| 29
| 52
| 6
| 3
| 3
| 27
|
2,319
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/electra/tokenization_electra_fast.py
|
transformers.models.electra.tokenization_electra_fast.ElectraTokenizerFast
|
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
import json
from typing import Optional
from .tokenization_electra import ElectraTokenizer
class ElectraTokenizerFast(PreTrainedTokenizerFast):
"""
Construct a "fast" ELECTRA tokenizer (backed by HuggingFace's *tokenizers* library). Based on WordPiece.
This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
refer to this superclass for more information regarding those methods.
Args:
vocab_file (`str`):
File containing the vocabulary.
do_lower_case (`bool`, *optional*, defaults to `True`):
Whether or not to lowercase the input when tokenizing.
unk_token (`str`, *optional*, defaults to `"[UNK]"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
sep_token (`str`, *optional*, defaults to `"[SEP]"`):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
sequence classification or for a text and a question for question answering. It is also used as the last
token of a sequence built with special tokens.
pad_token (`str`, *optional*, defaults to `"[PAD]"`):
The token used for padding, for example when batching sequences of different lengths.
cls_token (`str`, *optional*, defaults to `"[CLS]"`):
The classifier token which is used when doing sequence classification (classification of the whole sequence
instead of per-token classification). It is the first token of the sequence when built with special tokens.
mask_token (`str`, *optional*, defaults to `"[MASK]"`):
The token used for masking values. This is the token used when training this model with masked language
modeling. This is the token which the model will try to predict.
clean_text (`bool`, *optional*, defaults to `True`):
Whether or not to clean the text before tokenization by removing any control characters and replacing all
whitespaces by the classic one.
tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see [this
issue](https://github.com/huggingface/transformers/issues/328)).
strip_accents (`bool`, *optional*):
Whether or not to strip all accents. If this option is not specified, then it will be determined by the
value for `lowercase` (as in the original ELECTRA).
wordpieces_prefix (`str`, *optional*, defaults to `"##"`):
The prefix for subwords.
"""
vocab_files_names = VOCAB_FILES_NAMES
slow_tokenizer_class = ElectraTokenizer
def __init__(self, vocab_file=None, tokenizer_file=None, do_lower_case=True, unk_token='[UNK]', sep_token='[SEP]', pad_token='[PAD]', cls_token='[CLS]', mask_token='[MASK]', tokenize_chinese_chars=True, strip_accents=None, **kwargs):
super().__init__(vocab_file, tokenizer_file=tokenizer_file, do_lower_case=do_lower_case, unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents, **kwargs)
normalizer_state = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if normalizer_state.get('lowercase', do_lower_case) != do_lower_case or normalizer_state.get('strip_accents', strip_accents) != strip_accents or normalizer_state.get('handle_chinese_chars', tokenize_chinese_chars) != tokenize_chinese_chars:
normalizer_class = getattr(normalizers, normalizer_state.pop('type'))
normalizer_state['lowercase'] = do_lower_case
normalizer_state['strip_accents'] = strip_accents
normalizer_state['handle_chinese_chars'] = tokenize_chinese_chars
self.backend_tokenizer.normalizer = normalizer_class(**normalizer_state)
self.do_lower_case = do_lower_case
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. A ELECTRA sequence has the following format:
- single sequence: `[CLS] X [SEP]`
- pair of sequences: `[CLS] A [SEP] B [SEP]`
Args:
token_ids_0 (`List[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
"""
output = [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
if token_ids_1 is not None:
output += token_ids_1 + [self.sep_token_id]
return output
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]:
files = self._tokenizer.model.save(save_directory, name=filename_prefix)
return tuple(files)
|
class ElectraTokenizerFast(PreTrainedTokenizerFast):
'''
Construct a "fast" ELECTRA tokenizer (backed by HuggingFace's *tokenizers* library). Based on WordPiece.
This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
refer to this superclass for more information regarding those methods.
Args:
vocab_file (`str`):
File containing the vocabulary.
do_lower_case (`bool`, *optional*, defaults to `True`):
Whether or not to lowercase the input when tokenizing.
unk_token (`str`, *optional*, defaults to `"[UNK]"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
sep_token (`str`, *optional*, defaults to `"[SEP]"`):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
sequence classification or for a text and a question for question answering. It is also used as the last
token of a sequence built with special tokens.
pad_token (`str`, *optional*, defaults to `"[PAD]"`):
The token used for padding, for example when batching sequences of different lengths.
cls_token (`str`, *optional*, defaults to `"[CLS]"`):
The classifier token which is used when doing sequence classification (classification of the whole sequence
instead of per-token classification). It is the first token of the sequence when built with special tokens.
mask_token (`str`, *optional*, defaults to `"[MASK]"`):
The token used for masking values. This is the token used when training this model with masked language
modeling. This is the token which the model will try to predict.
clean_text (`bool`, *optional*, defaults to `True`):
Whether or not to clean the text before tokenization by removing any control characters and replacing all
whitespaces by the classic one.
tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see [this
issue](https://github.com/huggingface/transformers/issues/328)).
strip_accents (`bool`, *optional*):
Whether or not to strip all accents. If this option is not specified, then it will be determined by the
value for `lowercase` (as in the original ELECTRA).
wordpieces_prefix (`str`, *optional*, defaults to `"##"`):
The prefix for subwords.
'''
def __init__(self, vocab_file=None, tokenizer_file=None, do_lower_case=True, unk_token='[UNK]', sep_token='[SEP]', pad_token='[PAD]', cls_token='[CLS]', mask_token='[MASK]', tokenize_chinese_chars=True, strip_accents=None, **kwargs):
pass
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
'''
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. A ELECTRA sequence has the following format:
- single sequence: `[CLS] X [SEP]`
- pair of sequences: `[CLS] A [SEP] B [SEP]`
Args:
token_ids_0 (`List[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
'''
pass
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]:
pass
| 4
| 2
| 24
| 3
| 14
| 7
| 2
| 1.12
| 1
| 4
| 0
| 0
| 4
| 1
| 4
| 92
| 141
| 18
| 58
| 29
| 38
| 65
| 27
| 14
| 22
| 2
| 3
| 1
| 7
|
2,320
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/emu3/configuration_emu3.py
|
transformers.models.emu3.configuration_emu3.Emu3Config
|
from typing import Any, Optional, Union
from ...configuration_utils import PretrainedConfig
class Emu3Config(PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`Emu3Model`]. It is used to instantiate a
emu3 model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the
[Emu3-community/Emu3-Chat-hf](https://huggingface.co/Emu3-community/Emu3-Chat-hf).
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vq_config (`Union[Dict, Emu3VQVAEConfig]`, *optional*):
Emu3VQVAEConfig instance containing the configuration for the VQ-VAE model.
text_config (`Union[Dict, Emu3TextConfig]``, *optional*):
Emu3TextConfig instance containing the configuration for the language model.
vocabulary_map (`dict`, *optional*):
A dictionary containing the vocabulary map from the tokenizer. Used to obtain tokens from the image inputs.
"""
model_type = 'emu3'
keys_to_ignore_at_inference = ['past_key_values']
sub_configs = {'text_config': Emu3TextConfig, 'vq_config': Emu3VQVAEConfig}
def __init__(self, vq_config: Union[dict, Emu3VQVAEConfig]=None, text_config: Union[dict, Emu3TextConfig]=None, vocabulary_map: Optional[dict[int, int]]=None, **kwargs):
if vq_config is None:
vq_config = Emu3VQVAEConfig()
elif isinstance(vq_config, dict):
vq_config = Emu3VQVAEConfig(**vq_config)
if text_config is None:
text_config = Emu3TextConfig()
elif isinstance(text_config, dict):
text_config = Emu3TextConfig(**text_config)
self.vq_config = vq_config
self.text_config = text_config
self.vocabulary_map = vocabulary_map
self.image_token_id = vocabulary_map.get('<image>') if vocabulary_map is not None else None
super().__init__(**kwargs)
|
class Emu3Config(PretrainedConfig):
'''
This is the configuration class to store the configuration of a [`Emu3Model`]. It is used to instantiate a
emu3 model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the
[Emu3-community/Emu3-Chat-hf](https://huggingface.co/Emu3-community/Emu3-Chat-hf).
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vq_config (`Union[Dict, Emu3VQVAEConfig]`, *optional*):
Emu3VQVAEConfig instance containing the configuration for the VQ-VAE model.
text_config (`Union[Dict, Emu3TextConfig]``, *optional*):
Emu3TextConfig instance containing the configuration for the language model.
vocabulary_map (`dict`, *optional*):
A dictionary containing the vocabulary map from the tokenizer. Used to obtain tokens from the image inputs.
'''
def __init__(self, vq_config: Union[dict, Emu3VQVAEConfig]=None, text_config: Union[dict, Emu3TextConfig]=None, vocabulary_map: Optional[dict[int, int]]=None, **kwargs):
pass
| 2
| 1
| 22
| 3
| 19
| 0
| 5
| 0.65
| 1
| 5
| 2
| 0
| 1
| 3
| 1
| 1
| 46
| 8
| 23
| 14
| 15
| 15
| 15
| 8
| 13
| 5
| 1
| 1
| 5
|
2,321
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/emu3/configuration_emu3.py
|
transformers.models.emu3.configuration_emu3.Emu3TextConfig
|
from typing import Any, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...modeling_rope_utils import rope_config_validation
class Emu3TextConfig(PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`Emu3TextModel`]. It is used to instantiate a
emu3 model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the
[Emu3-community/Emu3-Chat-hf](https://huggingface.co/Emu3-community/Emu3-Chat-hf).
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 184622):
Vocabulary size of the Emu3 model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`Emu3Model`]
hidden_size (`int`, *optional*, defaults to 4096):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 14336):
Dimension of the MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 32):
Number of hidden layers in the Transformer decoder.
num_attention_heads (`int`, *optional*, defaults to 32):
Number of attention heads for each attention layer in the Transformer decoder.
num_key_value_heads (`int`, *optional*, defaults to 8):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
`num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details, check out [this
paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to
`num_attention_heads`.
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the decoder.
max_position_embeddings (`int`, *optional*, defaults to 9216):
The maximum sequence length that this model might ever be used with. Emu supports up to 9216 tokens,
rms_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the rms normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
pad_token_id (`int`, *optional*, defaults to 151643):
Padding token id.
bos_token_id (`int`, *optional*, defaults to 151849):
Beginning of stream token id.
eos_token_id (`int`, *optional*, defaults to 151850):
End of stream token id.
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether to tie weight embeddings
rope_theta (`float`, *optional*, defaults to 1000000.0):
The base period of the RoPE embeddings.
rope_scaling (`Dict`, *optional*):
Dictionary containing the scaling configuration for the RoPE embeddings. NOTE: if you apply new rope type
and you expect the model to work on longer `max_position_embeddings`, we recommend you to update this value
accordingly.
Expected contents:
`rope_type` (`str`):
The sub-variant of RoPE to use. Can be one of ['default', 'linear', 'dynamic', 'yarn', 'longrope',
'llama3'], with 'default' being the original RoPE implementation.
`factor` (`float`, *optional*):
Used with all rope types except 'default'. The scaling factor to apply to the RoPE embeddings. In
most scaling types, a `factor` of x will enable the model to handle sequences of length x *
original maximum pre-trained length.
`original_max_position_embeddings` (`int`, *optional*):
Used with 'dynamic', 'longrope' and 'llama3'. The original max position embeddings used during
pretraining.
`attention_factor` (`float`, *optional*):
Used with 'yarn' and 'longrope'. The scaling factor to be applied on the attention
computation. If unspecified, it defaults to value recommended by the implementation, using the
`factor` field to infer the suggested value.
`beta_fast` (`float`, *optional*):
Only used with 'yarn'. Parameter to set the boundary for extrapolation (only) in the linear
ramp function. If unspecified, it defaults to 32.
`beta_slow` (`float`, *optional*):
Only used with 'yarn'. Parameter to set the boundary for interpolation (only) in the linear
ramp function. If unspecified, it defaults to 1.
`short_factor` (`list[float]`, *optional*):
Only used with 'longrope'. The scaling factor to be applied to short contexts (<
`original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden
size divided by the number of attention heads divided by 2
`long_factor` (`list[float]`, *optional*):
Only used with 'longrope'. The scaling factor to be applied to long contexts (<
`original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden
size divided by the number of attention heads divided by 2
`low_freq_factor` (`float`, *optional*):
Only used with 'llama3'. Scaling factor applied to low frequency components of the RoPE
`high_freq_factor` (`float`, *optional*):
Only used with 'llama3'. Scaling factor applied to high frequency components of the RoPE
mlp_bias (`bool`, *optional*, defaults to `False`):
Whether to use a bias in up_proj, down_proj and gate_proj layers in the MLP layers.
attention_bias (`bool`, *optional*, defaults to `False`):
Whether to use a bias in the query, key, value and output projection layers during self-attention.
attention_dropout (`float`, *optional*, defaults to 0.1):
The dropout ratio for the attention probabilities.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
```python
>>> from transformers import Emu3Model, Emu3Config
>>> # Initializing a Emu3-community/Emu3-Chat-hf style configuration
>>> configuration = Emu3Config()
>>> # Initializing a model from the Emu3-community/Emu3-Chat-hf style configuration
>>> model = Emu3Model(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = 'emu3_text_model'
base_config_key = 'text_config'
keys_to_ignore_at_inference = ['past_key_values']
def __init__(self, vocab_size: int=184622, hidden_size: int=4096, intermediate_size: int=14336, num_hidden_layers: int=32, num_attention_heads: int=32, num_key_value_heads: Optional[int]=8, hidden_act: str='silu', max_position_embeddings: int=9216, rms_norm_eps: float=1e-05, use_cache: bool=True, pad_token_id: int=151643, bos_token_id: int=151849, eos_token_id: int=151850, tie_word_embeddings: bool=False, rope_theta: float=1000000.0, rope_scaling: Optional[dict[str, Any]]=None, mlp_bias=False, attention_bias=False, attention_dropout: float=0.1, initializer_range: float=0.02, **kwargs):
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.hidden_act = hidden_act
self.rms_norm_eps = rms_norm_eps
self.use_cache = use_cache
self.rope_theta = rope_theta
self.rope_scaling = rope_scaling
self.mlp_bias = mlp_bias
self.attention_bias = attention_bias
self.initializer_range = initializer_range
rope_config_validation(self)
self.attention_dropout = attention_dropout
super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs)
|
class Emu3TextConfig(PretrainedConfig):
'''
This is the configuration class to store the configuration of a [`Emu3TextModel`]. It is used to instantiate a
emu3 model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the
[Emu3-community/Emu3-Chat-hf](https://huggingface.co/Emu3-community/Emu3-Chat-hf).
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 184622):
Vocabulary size of the Emu3 model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`Emu3Model`]
hidden_size (`int`, *optional*, defaults to 4096):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 14336):
Dimension of the MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 32):
Number of hidden layers in the Transformer decoder.
num_attention_heads (`int`, *optional*, defaults to 32):
Number of attention heads for each attention layer in the Transformer decoder.
num_key_value_heads (`int`, *optional*, defaults to 8):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
`num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details, check out [this
paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to
`num_attention_heads`.
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the decoder.
max_position_embeddings (`int`, *optional*, defaults to 9216):
The maximum sequence length that this model might ever be used with. Emu supports up to 9216 tokens,
rms_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the rms normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
pad_token_id (`int`, *optional*, defaults to 151643):
Padding token id.
bos_token_id (`int`, *optional*, defaults to 151849):
Beginning of stream token id.
eos_token_id (`int`, *optional*, defaults to 151850):
End of stream token id.
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether to tie weight embeddings
rope_theta (`float`, *optional*, defaults to 1000000.0):
The base period of the RoPE embeddings.
rope_scaling (`Dict`, *optional*):
Dictionary containing the scaling configuration for the RoPE embeddings. NOTE: if you apply new rope type
and you expect the model to work on longer `max_position_embeddings`, we recommend you to update this value
accordingly.
Expected contents:
`rope_type` (`str`):
The sub-variant of RoPE to use. Can be one of ['default', 'linear', 'dynamic', 'yarn', 'longrope',
'llama3'], with 'default' being the original RoPE implementation.
`factor` (`float`, *optional*):
Used with all rope types except 'default'. The scaling factor to apply to the RoPE embeddings. In
most scaling types, a `factor` of x will enable the model to handle sequences of length x *
original maximum pre-trained length.
`original_max_position_embeddings` (`int`, *optional*):
Used with 'dynamic', 'longrope' and 'llama3'. The original max position embeddings used during
pretraining.
`attention_factor` (`float`, *optional*):
Used with 'yarn' and 'longrope'. The scaling factor to be applied on the attention
computation. If unspecified, it defaults to value recommended by the implementation, using the
`factor` field to infer the suggested value.
`beta_fast` (`float`, *optional*):
Only used with 'yarn'. Parameter to set the boundary for extrapolation (only) in the linear
ramp function. If unspecified, it defaults to 32.
`beta_slow` (`float`, *optional*):
Only used with 'yarn'. Parameter to set the boundary for interpolation (only) in the linear
ramp function. If unspecified, it defaults to 1.
`short_factor` (`list[float]`, *optional*):
Only used with 'longrope'. The scaling factor to be applied to short contexts (<
`original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden
size divided by the number of attention heads divided by 2
`long_factor` (`list[float]`, *optional*):
Only used with 'longrope'. The scaling factor to be applied to long contexts (<
`original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden
size divided by the number of attention heads divided by 2
`low_freq_factor` (`float`, *optional*):
Only used with 'llama3'. Scaling factor applied to low frequency components of the RoPE
`high_freq_factor` (`float`, *optional*):
Only used with 'llama3'. Scaling factor applied to high frequency components of the RoPE
mlp_bias (`bool`, *optional*, defaults to `False`):
Whether to use a bias in up_proj, down_proj and gate_proj layers in the MLP layers.
attention_bias (`bool`, *optional*, defaults to `False`):
Whether to use a bias in the query, key, value and output projection layers during self-attention.
attention_dropout (`float`, *optional*, defaults to 0.1):
The dropout ratio for the attention probabilities.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
```python
>>> from transformers import Emu3Model, Emu3Config
>>> # Initializing a Emu3-community/Emu3-Chat-hf style configuration
>>> configuration = Emu3Config()
>>> # Initializing a model from the Emu3-community/Emu3-Chat-hf style configuration
>>> model = Emu3Model(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```'''
def __init__(self, vocab_size: int=184622, hidden_size: int=4096, intermediate_size: int=14336, num_hidden_layers: int=32, num_attention_heads: int=32, num_key_value_heads: Optional[int]=8, hidden_act: str='silu', max_position_embeddings: int=9216, rms_norm_eps: float=1e-05, use_cache: bool=True, pad_token_id: int=151643, bos_token_id: int=151849, eos_token_id: int=151850, tie_word_embeddings: bool=False, rope_theta: float=1000000.0, rope_scaling: Optional[dict[str, Any]]=None, mlp_bias=False, attention_bias=False, attention_dropout: float=0.1, initializer_range: float=0.02, **kwargs):
pass
| 2
| 1
| 50
| 2
| 48
| 0
| 1
| 1.92
| 1
| 5
| 0
| 0
| 1
| 16
| 1
| 1
| 164
| 12
| 52
| 44
| 27
| 100
| 23
| 21
| 21
| 1
| 1
| 0
| 1
|
2,322
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/emu3/configuration_emu3.py
|
transformers.models.emu3.configuration_emu3.Emu3VQVAEConfig
|
from ...configuration_utils import PretrainedConfig
class Emu3VQVAEConfig(PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`Emu3VQVAE`]. It is used to instantiate an VQ-VAE
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a configuration to the VQ model presented in Emu3 paper.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
codebook_size (`int`, *optional*, defaults to 32768):
Codebook size of the VQ model.
embed_dim (`int`, *optional*, defaults to 4):
Dimension of the quantized vector in codebook.
latent_channels (`int`, *optional*, defaults to 4):
Dimension of the output channel of encoder and the input channel of decoder
double_latent (`bool`, *optional*, defaults to `False`):
Whether double the output dim of the encoder.
in_channels (`int`, *optional*, defaults to 3):
Input channel of encoder.
out_channels (`int`, *optional*, defaults to 3):
Output channel of decoder.
temporal_downsample_factor (`int`, *optional*, defaults to 4):
Temporal downsample factor.
base_channels (`int`, *optional*, defaults to 256):
Basic channel number of the intermediate blocks.
channel_multiplier (`list[int]`, *optional*, defaults to `[1, 2, 2, 4]`):
Channel scaling factor of the intermediate blocks.
num_res_blocks (`int`, *optional*, defaults to 2):
Residual block number in each stage.
attn_resolutions (`list[int]`, *optional*, defaults to `[3]`):
Stage indices to apply attention.
hidden_size (`int`, *optional*, defaults to 1024):
Dimension of the hidden representations in the attention layer.
num_attention_heads (`int`, *optional*, defaults to 1):
Number of attention heads for each attention layer.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
```python
>>> from transformers import Emu3VQVAE, Emu3VQVAEConfig
>>> # Initializing a video VQ model of Emu3 configuration
>>> configuration = Emu3VQVAEConfig()
>>> # Initializing a model from the Emu3 VQ model style configuration
>>> model = Emu3VQVAE(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = 'emu3_vqgan'
base_config_key = 'vq_config'
def __init__(self, codebook_size: int=32768, embed_dim: int=4, latent_channels: int=4, double_latent: bool=False, in_channels: int=3, out_channels: int=3, temporal_downsample_factor: int=4, base_channels: int=256, channel_multiplier: list[int]=[1, 2, 2, 4], num_res_blocks: int=2, attn_resolutions: list[int]=[3], hidden_size: int=1024, num_attention_heads: int=1, attention_dropout: float=0.0, **kwargs):
super().__init__(**kwargs)
self.codebook_size = codebook_size
self.embed_dim = embed_dim
self.latent_channels = latent_channels
self.double_latent = double_latent
self.in_channels = in_channels
self.out_channels = out_channels
self.temporal_downsample_factor = temporal_downsample_factor
self.base_channels = base_channels
self.channel_multiplier = channel_multiplier
self.num_res_blocks = num_res_blocks
self.attn_resolutions = attn_resolutions
self.hidden_size = hidden_size
self.num_attention_heads = num_attention_heads
self.attention_dropout = attention_dropout
|
class Emu3VQVAEConfig(PretrainedConfig):
'''
This is the configuration class to store the configuration of a [`Emu3VQVAE`]. It is used to instantiate an VQ-VAE
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a configuration to the VQ model presented in Emu3 paper.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
codebook_size (`int`, *optional*, defaults to 32768):
Codebook size of the VQ model.
embed_dim (`int`, *optional*, defaults to 4):
Dimension of the quantized vector in codebook.
latent_channels (`int`, *optional*, defaults to 4):
Dimension of the output channel of encoder and the input channel of decoder
double_latent (`bool`, *optional*, defaults to `False`):
Whether double the output dim of the encoder.
in_channels (`int`, *optional*, defaults to 3):
Input channel of encoder.
out_channels (`int`, *optional*, defaults to 3):
Output channel of decoder.
temporal_downsample_factor (`int`, *optional*, defaults to 4):
Temporal downsample factor.
base_channels (`int`, *optional*, defaults to 256):
Basic channel number of the intermediate blocks.
channel_multiplier (`list[int]`, *optional*, defaults to `[1, 2, 2, 4]`):
Channel scaling factor of the intermediate blocks.
num_res_blocks (`int`, *optional*, defaults to 2):
Residual block number in each stage.
attn_resolutions (`list[int]`, *optional*, defaults to `[3]`):
Stage indices to apply attention.
hidden_size (`int`, *optional*, defaults to 1024):
Dimension of the hidden representations in the attention layer.
num_attention_heads (`int`, *optional*, defaults to 1):
Number of attention heads for each attention layer.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
```python
>>> from transformers import Emu3VQVAE, Emu3VQVAEConfig
>>> # Initializing a video VQ model of Emu3 configuration
>>> configuration = Emu3VQVAEConfig()
>>> # Initializing a model from the Emu3 VQ model style configuration
>>> model = Emu3VQVAE(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```'''
def __init__(self, codebook_size: int=32768, embed_dim: int=4, latent_channels: int=4, double_latent: bool=False, in_channels: int=3, out_channels: int=3, temporal_downsample_factor: int=4, base_channels: int=256, channel_multiplier: list[int]=[1, 2, 2, 4], num_res_blocks: int=2, attn_resolutions: list[int]=[3], hidden_size: int=1024, num_attention_heads: int=1, attention_dropout: float=0.0, **kwargs):
pass
| 2
| 1
| 34
| 1
| 33
| 0
| 1
| 1.22
| 1
| 4
| 0
| 0
| 1
| 14
| 1
| 1
| 88
| 8
| 36
| 35
| 17
| 44
| 19
| 18
| 17
| 1
| 1
| 0
| 1
|
2,323
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/emu3/image_processing_emu3.py
|
transformers.models.emu3.image_processing_emu3.Emu3ImageProcessor
|
from ...image_utils import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, get_image_size, infer_channel_dimension_format, is_scaled_image, make_flat_list_of_images, make_nested_list_of_images, to_numpy_array, valid_images, validate_preprocess_arguments
from typing import Optional, Union
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import convert_to_rgb, pad, resize, to_channel_dimension_format
from collections.abc import Iterable
import numpy as np
from ...utils import TensorType, is_vision_available, logging
class Emu3ImageProcessor(BaseImageProcessor):
"""
Constructs a Emu3 image processor that dynamically resizes images based on the original images.
Args:
do_resize (`bool`, *optional*, defaults to `True`):
Whether to resize the image's (height, width) dimensions.
resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`):
Resampling filter to use when resizing the image.
do_rescale (`bool`, *optional*, defaults to `True`):
Whether to rescale the image by the specified scale `rescale_factor`.
rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
Scale factor to use if rescaling the image.
do_normalize (`bool`, *optional*, defaults to `True`):
Whether to normalize the image.
image_mean (`float` or `list[float]`, *optional*, defaults to `[0.48145466, 0.4578275, 0.40821073]`):
Mean to use if normalizing the image. This is a float or list of floats for each channel in the image.
image_std (`float` or `list[float]`, *optional*, defaults to `[0.26862954, 0.26130258, 0.27577711]`):
Standard deviation to use if normalizing the image. This is a float or list of floats for each channel in the image.
do_convert_rgb (`bool`, *optional*, defaults to `True`):
Whether to convert the image to RGB.
do_pad (`bool`, *optional*, defaults to `True`):
Whether to pad the image. If `True`, will pad the patch dimension of the images in the batch to the largest
number of patches in the batch. Padding will be applied to the bottom and right with zeros.
min_pixels (`int`, *optional*, defaults to `512 * 512`):
The min pixels of the image to resize the image.
max_pixels (`int`, *optional*, defaults to `1024 * 1024`):
The max pixels of the image to resize the image.
spatial_factor (`int`, *optional*, defaults to 8):
The spatial downsample factor the image will be downsampled in feature extracting phase
"""
model_input_names = ['pixel_values', 'image_sizes']
def __init__(self, do_resize: bool=True, resample: PILImageResampling=PILImageResampling.BICUBIC, do_rescale: bool=True, rescale_factor: Union[int, float]=1 / 255, do_normalize: bool=True, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, do_convert_rgb: bool=True, do_pad: bool=True, min_pixels: int=512 * 512, max_pixels: int=1024 * 1024, spatial_factor: int=8, **kwargs) -> None:
super().__init__(**kwargs)
self.do_resize = do_resize
self.resample = resample
self.do_rescale = do_rescale
self.rescale_factor = rescale_factor
self.do_normalize = do_normalize
self.image_mean = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
self.image_std = image_std if image_std is not None else OPENAI_CLIP_STD
self.min_pixels = min_pixels
self.max_pixels = max_pixels
self.spatial_factor = spatial_factor
self.size = {'min_pixels': min_pixels, 'max_pixels': max_pixels}
self.do_convert_rgb = do_convert_rgb
def _preprocess(self, images: ImageInput, do_resize: Optional[bool]=None, resample: Optional[PILImageResampling]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, do_convert_rgb: Optional[bool]=None, data_format: Optional[ChannelDimension]=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None):
"""
Preprocess an image or batch of images.
Args:
images (`ImageInput`):
Image or batch of images to preprocess. Expects pixel values ranging from 0 to 255. If pixel values range from 0 to 1, set `do_rescale=False`.
vision_info (`list[Dict]`, *optional*):
Optional list of dictionaries containing additional information about vision inputs.
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
Whether to resize the image.
resample (`PILImageResampling`, *optional*, defaults to `self.resample`):
Resampling filter to use if resizing the image. This can be one of the `PILImageResampling` enums.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the image.
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Scale factor to use if rescaling the image.
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
Whether to normalize the image.
image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`):
Mean to use if normalizing the image. Can be a float or a list of floats corresponding to the number of channels in the image.
image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`):
Standard deviation to use if normalizing the image. Can be a float or a list of floats corresponding to the number of channels in the image.
do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`):
Whether to convert the image to RGB.
data_format (`ChannelDimension`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- Unset: Use the channel dimension format of the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
"""
images = make_flat_list_of_images(images)
if do_convert_rgb:
images = [convert_to_rgb(image) for image in images]
images = [to_numpy_array(image) for image in images]
if is_scaled_image(images[0]) and do_rescale:
logger.warning_once('It looks like you are trying to rescale already rescaled images. If the input images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again.')
if input_data_format is None:
input_data_format = infer_channel_dimension_format(images[0])
height, width = get_image_size(images[0], channel_dim=input_data_format)
resized_height, resized_width = (height, width)
processed_images = []
for image in images:
if do_resize:
resized_height, resized_width = smart_resize(height, width, factor=self.spatial_factor, min_pixels=self.min_pixels, max_pixels=self.max_pixels)
image = resize(image, size=(resized_height, resized_width), resample=resample, input_data_format=input_data_format)
if do_rescale:
image = self.rescale(image, scale=rescale_factor, input_data_format=input_data_format)
if do_normalize:
image = self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format)
image = to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format)
processed_images.append(image)
images = np.array(processed_images)
return images
def _pad_for_batching(self, pixel_values: list[np.ndarray], image_sizes: list[list[int]], data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None):
"""
Pads images on the `num_of_patches` dimension with zeros to form a batch of same number of patches.
Args:
pixel_values (`list[np.ndarray]`):
An array of pixel values of each images of shape (`batch_size`, `num_patches`, `image_in_3D`)
image_sizes (`list[list[int]]`):
A list of sizes for each image in `pixel_values` in (height, width) format.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format for the output image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
If unset, will use same as the input image.
input_data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format for the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
If unset, will use the inferred format of the input image.
Returns:
list[`np.ndarray`]: The padded images.
"""
max_shape = (max([size[0] for size in image_sizes]), max([size[1] for size in image_sizes]))
pixel_values = [pad(image, padding=((0, max_shape[0] - size[0]), (0, max_shape[1] - size[1])), data_format=data_format, input_data_format=input_data_format) for image, size in zip(pixel_values, image_sizes)]
return pixel_values
def preprocess(self, images: ImageInput, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, resample: Optional[PILImageResampling]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, do_convert_rgb: Optional[bool]=None, do_pad: bool=True, return_tensors: Optional[Union[str, TensorType]]=None, data_format: Optional[ChannelDimension]=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None):
"""
Args:
images (`ImageInput`):
Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
passing in images with pixel values between 0 and 1, set `do_rescale=False`.
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
Whether to resize the image.
size (`dict[str, int]`, *optional*, defaults to `self.size`):
Size of the image after resizing. Shortest edge of the image is resized to size["shortest_edge"], with
the longest edge resized to keep the input aspect ratio.
resample (`int`, *optional*, defaults to `self.resample`):
Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`. Only
has an effect if `do_resize` is set to `True`.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the image.
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
Whether to normalize the image.
image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`):
Image mean to use for normalization. Only has an effect if `do_normalize` is set to `True`.
image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`):
Image standard deviation to use for normalization. Only has an effect if `do_normalize` is set to
`True`.
do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`):
Whether to convert the image to RGB.
do_pad (`bool`, *optional*, defaults to `True`):
Whether to pad the image. If `True`, will pad the patch dimension of the images in the batch to the largest
number of patches in the batch. Padding will be applied to the bottom and right with zeros.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- Unset: Use the channel dimension format of the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
"""
do_resize = do_resize if do_resize is not None else self.do_resize
size = size if size is not None else self.size
resample = resample if resample is not None else self.resample
do_rescale = do_rescale if do_rescale is not None else self.do_rescale
rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
do_normalize = do_normalize if do_normalize is not None else self.do_normalize
image_mean = image_mean if image_mean is not None else self.image_mean
image_std = image_std if image_std is not None else self.image_std
do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
do_pad = do_pad if do_pad is not None else self.do_pad
if images is not None:
images = self.fetch_images(images)
images = make_nested_list_of_images(images)
if images is not None and (not valid_images(images)):
raise ValueError('Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, or torch.Tensor')
validate_preprocess_arguments(rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_resize=do_resize, size=size, resample=resample)
pixel_values = []
for image in images:
if image:
image = self._preprocess(image, do_resize=do_resize, resample=resample, do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, data_format=data_format, do_convert_rgb=do_convert_rgb, input_data_format=input_data_format)
pixel_values.extend(image)
image_sizes = [image.shape[-2:] for image in pixel_values]
if do_pad:
pixel_values = self._pad_for_batching(pixel_values, image_sizes)
pixel_values = np.array(pixel_values)
return BatchFeature(data={'pixel_values': pixel_values, 'image_sizes': image_sizes}, tensor_type=return_tensors)
def postprocess(self, images: ImageInput, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, return_tensors: Union[str, TensorType]='PIL.Image.Image', input_data_format: Optional[Union[str, ChannelDimension]]=None):
"""
Postprocess an image or batch of images tensor. Postprocess is the reverse process of preprocess.
The parameters should be same as in preprocess.
Args:
images (`ImageInput`):
Image to postprocess. Expects a single or batch of images with pixel values ranging from -1 to 1.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the image.
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
Whether to normalize the image.
image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`):
Image mean to use for normalization. Only has an effect if `do_normalize` is set to `True`.
image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`):
Image standard deviation to use for normalization. Only has an effect if `do_normalize` is set to `True`.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
"""
do_rescale = do_rescale if do_rescale is not None else self.do_rescale
rescale_factor = 1.0 / self.rescale_factor if rescale_factor is None else rescale_factor
do_normalize = do_normalize if do_normalize is not None else self.do_normalize
image_mean = image_mean if image_mean is not None else self.image_mean
image_std = image_std if image_std is not None else self.image_std
images = make_flat_list_of_images(images)
if isinstance(images[0], Image.Image):
return images if len(images) > 1 else images[0]
if input_data_format is None:
input_data_format = infer_channel_dimension_format(images[0])
pixel_values = []
for image in images:
image = to_numpy_array(image)
if do_normalize:
image = self.unnormalize(image=image, image_mean=image_mean, image_std=image_std, input_data_format=input_data_format)
if do_rescale:
image = self.rescale(image, scale=rescale_factor, input_data_format=input_data_format)
image = image.clip(0, 255).astype(np.uint8)
if do_normalize and do_rescale and (return_tensors == 'PIL.Image.Image'):
image = to_channel_dimension_format(image, ChannelDimension.LAST, input_channel_dim=input_data_format)
pixel_values.append(Image.fromarray(image))
else:
pixel_values.extend(image)
data = {'pixel_values': pixel_values}
return_tensors = return_tensors if return_tensors != 'PIL.Image.Image' else None
return BatchFeature(data=data, tensor_type=return_tensors)
def unnormalize(self, image: np.ndarray, image_mean: Union[float, Iterable[float]], image_std: Union[float, Iterable[float]], input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.array:
"""
Unnormalizes `image` using the mean and standard deviation specified by `mean` and `std`.
image = (image * image_std) + image_mean
Args:
image (`torch.Tensor` of shape `(batch_size, num_channels, image_size, image_size)` or `(num_channels, image_size, image_size)`):
Batch of pixel values to postprocess.
image_mean (`float` or `Iterable[float]`):
The mean to use for unnormalization.
image_std (`float` or `Iterable[float]`):
The standard deviation to use for unnormalization.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
"""
num_channels = 3
if isinstance(image_mean, Iterable):
if len(image_mean) != num_channels:
raise ValueError(f'mean must have {num_channels} elements if it is an iterable, got {len(image_mean)}')
else:
image_mean = [image_mean] * num_channels
if isinstance(image_std, Iterable):
if len(image_std) != num_channels:
raise ValueError(f'std must have {num_channels} elements if it is an iterable, got {len(image_std)}')
else:
image_std = [image_std] * num_channels
rev_image_mean = tuple((-mean / std for mean, std in zip(image_mean, image_std)))
rev_image_std = tuple((1 / std for std in image_std))
image = self.normalize(image=image, mean=rev_image_mean, std=rev_image_std, input_data_format=input_data_format)
return image
|
class Emu3ImageProcessor(BaseImageProcessor):
'''
Constructs a Emu3 image processor that dynamically resizes images based on the original images.
Args:
do_resize (`bool`, *optional*, defaults to `True`):
Whether to resize the image's (height, width) dimensions.
resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`):
Resampling filter to use when resizing the image.
do_rescale (`bool`, *optional*, defaults to `True`):
Whether to rescale the image by the specified scale `rescale_factor`.
rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
Scale factor to use if rescaling the image.
do_normalize (`bool`, *optional*, defaults to `True`):
Whether to normalize the image.
image_mean (`float` or `list[float]`, *optional*, defaults to `[0.48145466, 0.4578275, 0.40821073]`):
Mean to use if normalizing the image. This is a float or list of floats for each channel in the image.
image_std (`float` or `list[float]`, *optional*, defaults to `[0.26862954, 0.26130258, 0.27577711]`):
Standard deviation to use if normalizing the image. This is a float or list of floats for each channel in the image.
do_convert_rgb (`bool`, *optional*, defaults to `True`):
Whether to convert the image to RGB.
do_pad (`bool`, *optional*, defaults to `True`):
Whether to pad the image. If `True`, will pad the patch dimension of the images in the batch to the largest
number of patches in the batch. Padding will be applied to the bottom and right with zeros.
min_pixels (`int`, *optional*, defaults to `512 * 512`):
The min pixels of the image to resize the image.
max_pixels (`int`, *optional*, defaults to `1024 * 1024`):
The max pixels of the image to resize the image.
spatial_factor (`int`, *optional*, defaults to 8):
The spatial downsample factor the image will be downsampled in feature extracting phase
'''
def __init__(self, do_resize: bool=True, resample: PILImageResampling=PILImageResampling.BICUBIC, do_rescale: bool=True, rescale_factor: Union[int, float]=1 / 255, do_normalize: bool=True, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, do_convert_rgb: bool=True, do_pad: bool=True, min_pixels: int=512 * 512, max_pixels: int=1024 * 1024, spatial_factor: int=8, **kwargs) -> None:
pass
def _preprocess(self, images: ImageInput, do_resize: Optional[bool]=None, resample: Optional[PILImageResampling]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, do_convert_rgb: Optional[bool]=None, data_format: Optional[ChannelDimension]=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None):
'''
Preprocess an image or batch of images.
Args:
images (`ImageInput`):
Image or batch of images to preprocess. Expects pixel values ranging from 0 to 255. If pixel values range from 0 to 1, set `do_rescale=False`.
vision_info (`list[Dict]`, *optional*):
Optional list of dictionaries containing additional information about vision inputs.
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
Whether to resize the image.
resample (`PILImageResampling`, *optional*, defaults to `self.resample`):
Resampling filter to use if resizing the image. This can be one of the `PILImageResampling` enums.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the image.
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Scale factor to use if rescaling the image.
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
Whether to normalize the image.
image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`):
Mean to use if normalizing the image. Can be a float or a list of floats corresponding to the number of channels in the image.
image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`):
Standard deviation to use if normalizing the image. Can be a float or a list of floats corresponding to the number of channels in the image.
do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`):
Whether to convert the image to RGB.
data_format (`ChannelDimension`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- Unset: Use the channel dimension format of the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
'''
pass
def _pad_for_batching(self, pixel_values: list[np.ndarray], image_sizes: list[list[int]], data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None):
'''
Pads images on the `num_of_patches` dimension with zeros to form a batch of same number of patches.
Args:
pixel_values (`list[np.ndarray]`):
An array of pixel values of each images of shape (`batch_size`, `num_patches`, `image_in_3D`)
image_sizes (`list[list[int]]`):
A list of sizes for each image in `pixel_values` in (height, width) format.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format for the output image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
If unset, will use same as the input image.
input_data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format for the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
If unset, will use the inferred format of the input image.
Returns:
list[`np.ndarray`]: The padded images.
'''
pass
def preprocess(self, images: ImageInput, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, resample: Optional[PILImageResampling]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, do_convert_rgb: Optional[bool]=None, do_pad: bool=True, return_tensors: Optional[Union[str, TensorType]]=None, data_format: Optional[ChannelDimension]=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None):
'''
Args:
images (`ImageInput`):
Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
passing in images with pixel values between 0 and 1, set `do_rescale=False`.
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
Whether to resize the image.
size (`dict[str, int]`, *optional*, defaults to `self.size`):
Size of the image after resizing. Shortest edge of the image is resized to size["shortest_edge"], with
the longest edge resized to keep the input aspect ratio.
resample (`int`, *optional*, defaults to `self.resample`):
Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`. Only
has an effect if `do_resize` is set to `True`.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the image.
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
Whether to normalize the image.
image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`):
Image mean to use for normalization. Only has an effect if `do_normalize` is set to `True`.
image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`):
Image standard deviation to use for normalization. Only has an effect if `do_normalize` is set to
`True`.
do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`):
Whether to convert the image to RGB.
do_pad (`bool`, *optional*, defaults to `True`):
Whether to pad the image. If `True`, will pad the patch dimension of the images in the batch to the largest
number of patches in the batch. Padding will be applied to the bottom and right with zeros.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- Unset: Use the channel dimension format of the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
'''
pass
def postprocess(self, images: ImageInput, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, return_tensors: Union[str, TensorType]='PIL.Image.Image', input_data_format: Optional[Union[str, ChannelDimension]]=None):
'''
Postprocess an image or batch of images tensor. Postprocess is the reverse process of preprocess.
The parameters should be same as in preprocess.
Args:
images (`ImageInput`):
Image to postprocess. Expects a single or batch of images with pixel values ranging from -1 to 1.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the image.
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
Whether to normalize the image.
image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`):
Image mean to use for normalization. Only has an effect if `do_normalize` is set to `True`.
image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`):
Image standard deviation to use for normalization. Only has an effect if `do_normalize` is set to `True`.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
'''
pass
def unnormalize(self, image: np.ndarray, image_mean: Union[float, Iterable[float]], image_std: Union[float, Iterable[float]], input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.array:
'''
Unnormalizes `image` using the mean and standard deviation specified by `mean` and `std`.
image = (image * image_std) + image_mean
Args:
image (`torch.Tensor` of shape `(batch_size, num_channels, image_size, image_size)` or `(num_channels, image_size, image_size)`):
Batch of pixel values to postprocess.
image_mean (`float` or `Iterable[float]`):
The mean to use for unnormalization.
image_std (`float` or `Iterable[float]`):
The standard deviation to use for unnormalization.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
'''
pass
| 7
| 6
| 68
| 5
| 38
| 25
| 8
| 0.77
| 1
| 10
| 2
| 0
| 6
| 12
| 6
| 26
| 447
| 37
| 231
| 99
| 158
| 179
| 109
| 33
| 102
| 15
| 3
| 2
| 46
|
2,324
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/emu3/modeling_emu3.py
|
transformers.models.emu3.modeling_emu3.Emu3Attention
|
import torch.nn.functional as F
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from ...utils.deprecation import deprecate_kwarg
from .configuration_emu3 import Emu3Config, Emu3TextConfig, Emu3VQVAEConfig
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple
import torch.nn as nn
import torch
from ...processing_utils import Unpack
from ...cache_utils import Cache, DynamicCache
from typing import Callable, Optional, Union
class Emu3Attention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, config: Emu3Config, layer_idx: int):
super().__init__()
self.config = config
self.layer_idx = layer_idx
self.head_dim = getattr(config, 'head_dim', config.hidden_size // config.num_attention_heads)
self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
self.scaling = self.head_dim ** (-0.5)
self.attention_dropout = config.attention_dropout
self.is_causal = True
self.q_proj = nn.Linear(config.hidden_size, config.num_attention_heads * self.head_dim, bias=config.attention_bias)
self.k_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias)
self.v_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias)
self.o_proj = nn.Linear(config.num_attention_heads * self.head_dim, config.hidden_size, bias=config.attention_bias)
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, position_embeddings: tuple[torch.Tensor, torch.Tensor], attention_mask: Optional[torch.Tensor], past_key_values: Optional[Cache]=None, cache_position: Optional[torch.LongTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> tuple[torch.Tensor, torch.Tensor]:
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, self.head_dim)
query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2)
key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2)
value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2)
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
if past_key_values is not None:
cache_kwargs = {'sin': sin, 'cos': cos, 'cache_position': cache_position}
key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != 'eager':
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(self, query_states, key_states, value_states, attention_mask, dropout=0.0 if not self.training else self.attention_dropout, scaling=self.scaling, **kwargs)
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
attn_output = self.o_proj(attn_output)
return (attn_output, attn_weights)
|
class Emu3Attention(nn.Module):
'''Multi-headed attention from 'Attention Is All You Need' paper'''
def __init__(self, config: Emu3Config, layer_idx: int):
pass
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, position_embeddings: tuple[torch.Tensor, torch.Tensor], attention_mask: Optional[torch.Tensor], past_key_values: Optional[Cache]=None, cache_position: Optional[torch.LongTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> tuple[torch.Tensor, torch.Tensor]:
pass
| 4
| 1
| 35
| 4
| 31
| 1
| 3
| 0.03
| 1
| 6
| 3
| 0
| 2
| 11
| 2
| 12
| 74
| 9
| 63
| 31
| 52
| 2
| 34
| 23
| 31
| 5
| 1
| 2
| 6
|
2,325
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/emu3/modeling_emu3.py
|
transformers.models.emu3.modeling_emu3.Emu3DecoderLayer
|
import torch.nn.functional as F
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple
import torch.nn as nn
from ...processing_utils import Unpack
import torch
from ...utils.deprecation import deprecate_kwarg
from ...cache_utils import Cache, DynamicCache
from .configuration_emu3 import Emu3Config, Emu3TextConfig, Emu3VQVAEConfig
from typing import Callable, Optional, Union
from ...modeling_layers import GradientCheckpointingLayer
class Emu3DecoderLayer(GradientCheckpointingLayer):
def __init__(self, config: Emu3Config, layer_idx: int):
super().__init__()
self.hidden_size = config.hidden_size
self.self_attn = Emu3Attention(config=config, layer_idx=layer_idx)
self.mlp = Emu3MLP(config)
self.input_layernorm = Emu3RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.post_attention_layernorm = Emu3RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.dropout = nn.Dropout(config.attention_dropout)
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, use_cache: Optional[bool]=False, cache_position: Optional[torch.LongTensor]=None, position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]]=None, **kwargs: Unpack[TransformersKwargs]) -> torch.Tensor:
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
hidden_states, _ = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, use_cache=use_cache, cache_position=cache_position, position_embeddings=position_embeddings, **kwargs)
hidden_states = residual + self.dropout(hidden_states)
residual = hidden_states
hidden_states = self.post_attention_layernorm(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + self.dropout(hidden_states)
return hidden_states
|
class Emu3DecoderLayer(GradientCheckpointingLayer):
def __init__(self, config: Emu3Config, layer_idx: int):
pass
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, use_cache: Optional[bool]=False, cache_position: Optional[torch.LongTensor]=None, position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]]=None, **kwargs: Unpack[TransformersKwargs]) -> torch.Tensor:
pass
| 4
| 0
| 36
| 4
| 21
| 11
| 2
| 0.49
| 1
| 9
| 5
| 0
| 2
| 6
| 2
| 12
| 73
| 9
| 43
| 23
| 29
| 21
| 22
| 12
| 19
| 2
| 1
| 1
| 3
|
2,326
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/emu3/modeling_emu3.py
|
transformers.models.emu3.modeling_emu3.Emu3ForCausalLM
|
import torch.nn.functional as F
import torch.nn as nn
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple
from ...generation import GenerationMixin
import torch
from ...processing_utils import Unpack
from ...modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast
from ...cache_utils import Cache, DynamicCache
from .configuration_emu3 import Emu3Config, Emu3TextConfig, Emu3VQVAEConfig
from typing import Callable, Optional, Union
@auto_docstring
class Emu3ForCausalLM(Emu3PreTrainedModel, GenerationMixin):
_tied_weights_keys = ['lm_head.weight']
_tp_plan = {'lm_head': 'colwise_rep'}
_pp_plan = {'lm_head': (['hidden_states'], ['logits'])}
config: Emu3TextConfig
def __init__(self, config):
super().__init__(config)
self.model = Emu3TextModel(config)
self.vocab_size = config.vocab_size
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.post_init()
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, logits_to_keep: Union[int, torch.Tensor]=0, **kwargs: Unpack[TransformersKwargs]) -> CausalLMOutputWithPast:
"""
Example:
```python
>>> from transformers import Emu3Processor, Emu3ForConditionalGeneration
>>> import torch
>>> import requests
>>> from PIL import Image
>>> model = Emu3ForCausalLM.from_pretrained("BAAI/Emu3-Chat-hf", dtype=torch.bfloat16)
>>> processor = Emu3Processor.from_pretrained("BAAI/Emu3-Chat-hf")
>>> inputs = processor(text=["Can you write me a poem about winter."], return_tensors="pt").to(model.device)
>>> generated_ids = model.generate(**inputs, max_new_tokens=100, do_sample=False)
>>> processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
```"""
outputs: BaseModelOutputWithPast = self.model(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, cache_position=cache_position, **kwargs)
hidden_states = outputs.last_hidden_state
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :])
loss = None
if labels is not None:
loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.vocab_size, **kwargs)
return CausalLMOutputWithPast(loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
|
@auto_docstring
class Emu3ForCausalLM(Emu3PreTrainedModel, GenerationMixin):
def __init__(self, config):
pass
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, logits_to_keep: Union[int, torch.Tensor]=0, **kwargs: Unpack[TransformersKwargs]) -> CausalLMOutputWithPast:
'''
Example:
```python
>>> from transformers import Emu3Processor, Emu3ForConditionalGeneration
>>> import torch
>>> import requests
>>> from PIL import Image
>>> model = Emu3ForCausalLM.from_pretrained("BAAI/Emu3-Chat-hf", dtype=torch.bfloat16)
>>> processor = Emu3Processor.from_pretrained("BAAI/Emu3-Chat-hf")
>>> inputs = processor(text=["Can you write me a poem about winter."], return_tensors="pt").to(model.device)
>>> generated_ids = model.generate(**inputs, max_new_tokens=100, do_sample=False)
>>> processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
```'''
pass
| 6
| 1
| 14
| 2
| 9
| 4
| 2
| 0.37
| 2
| 9
| 4
| 0
| 8
| 3
| 8
| 9
| 124
| 21
| 75
| 37
| 48
| 28
| 37
| 21
| 28
| 8
| 2
| 1
| 15
|
2,327
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/emu3/modeling_emu3.py
|
transformers.models.emu3.modeling_emu3.Emu3ForConditionalGeneration
|
import torch.nn.functional as F
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple
import torch.nn as nn
from ...generation import GenerationMixin
import torch
from ...modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast
from ...cache_utils import Cache, DynamicCache
from typing import Callable, Optional, Union
from ...processing_utils import Unpack
class Emu3ForConditionalGeneration(Emu3PreTrainedModel, GenerationMixin):
base_model_prefix = ''
_tied_weights_keys = ['lm_head.weight']
_checkpoint_conversion_mapping = {'^text_model.model': 'model.text_model', '^vqmodel': 'model.vqmodel', '^text_model.lm_head': 'lm_head'}
def __init__(self, config):
super().__init__(config)
self.model = Emu3Model(config)
self.lm_head = nn.Linear(config.text_config.hidden_size, config.text_config.vocab_size, bias=False)
self.post_init()
def get_input_embeddings(self):
return self.model.get_input_embeddings()
def set_input_embeddings(self, value):
self.model.set_input_embeddings(value)
def get_output_embeddings(self) -> nn.Module:
return self.lm_head
def set_decoder(self, decoder):
self.model.set_decoder(decoder)
def get_decoder(self):
return self.model.get_decoder()
@property
def text_model(self):
return self.model.text_model
@property
def vqmodel(self):
return self.model.vqmodel
@property
def vocabulary_mapping(self):
return self.model.vocabulary_mapping
def decode_image_tokens(self, **kwargs):
return self.model.decode_image_tokens(**kwargs)
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, pixel_values: Optional[torch.FloatTensor]=None, image_sizes: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, labels: Optional[torch.LongTensor]=None, logits_to_keep: Union[int, torch.Tensor]=0, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple, CausalLMOutputWithPast]:
"""
image_sizes (`torch.LongTensor` of shape `(batch_size, 2)`):
The sizes of the images in the batch, being (height, width) for each image. Image sizes can be obtained using
[`AutoImageProcessor`]. See [`Emu3ImageProcessor.__call__`] for details ([]`Emu3Processor`] uses
[`Emu3ImageProcessor`] for processing images).
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Example:
```python
>>> from transformers import Emu3Processor, Emu3ForConditionalGeneration
>>> import torch
>>> import requests
>>> from PIL import Image
>>> model = Emu3ForConditionalGeneration.from_pretrained("BAAI/Emu3-Chat-hf", dtype=torch.bfloat16)
>>> processor = Emu3Processor.from_pretrained("BAAI/Emu3-Chat-hf")
>>> conversation = [
... {
... "role": "system",
... "content": [
... {"type": "text", "text": "You are a helpful assistant."},
... ],
... },
... {
... "role": "user",
... "content": [
... {"type": "image"},
... {"type": "text", "text": "Please describe the image."},
... ],
... },
... ]
>>> prompt = processor.apply_chat_template(conversation, add_generation_prompt=True)
>>> image = Image.open(requests.get("https://www.ilankelman.org/stopsigns/australia.jpg", stream=True).raw)
>>> inputs = processor(images=[image], text=[prompt], return_tensors="pt").to(model.device, torch.bfloat16)
>>> generated_ids = model.generate(**inputs, max_new_tokens=100, do_sample=False)
>>> processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
```"""
outputs = self.model(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, cache_position=cache_position, **kwargs)
hidden_states = outputs[0]
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :])
loss = None
if labels is not None:
loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.text_config.vocab_size, **kwargs)
return CausalLMOutputWithPast(loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
def prepare_inputs_for_generation(self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, cache_position=None, position_ids=None, use_cache=True, pixel_values=None, **kwargs):
model_inputs = super().prepare_inputs_for_generation(input_ids, past_key_values=past_key_values, attention_mask=attention_mask, inputs_embeds=inputs_embeds, cache_position=cache_position, position_ids=position_ids, pixel_values=pixel_values, use_cache=use_cache, **kwargs)
if cache_position[0] != 0:
model_inputs['pixel_values'] = None
return model_inputs
|
class Emu3ForConditionalGeneration(Emu3PreTrainedModel, GenerationMixin):
def __init__(self, config):
pass
def get_input_embeddings(self):
pass
def set_input_embeddings(self, value):
pass
def get_output_embeddings(self) -> nn.Module:
pass
def set_decoder(self, decoder):
pass
def get_decoder(self):
pass
@property
def text_model(self):
pass
@property
def vqmodel(self):
pass
@property
def vocabulary_mapping(self):
pass
def decode_image_tokens(self, **kwargs):
pass
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, pixel_values: Optional[torch.FloatTensor]=None, image_sizes: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, labels: Optional[torch.LongTensor]=None, logits_to_keep: Union[int, torch.Tensor]=0, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple, CausalLMOutputWithPast]:
'''
image_sizes (`torch.LongTensor` of shape `(batch_size, 2)`):
The sizes of the images in the batch, being (height, width) for each image. Image sizes can be obtained using
[`AutoImageProcessor`]. See [`Emu3ImageProcessor.__call__`] for details ([]`Emu3Processor`] uses
[`Emu3ImageProcessor`] for processing images).
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Example:
```python
>>> from transformers import Emu3Processor, Emu3ForConditionalGeneration
>>> import torch
>>> import requests
>>> from PIL import Image
>>> model = Emu3ForConditionalGeneration.from_pretrained("BAAI/Emu3-Chat-hf", dtype=torch.bfloat16)
>>> processor = Emu3Processor.from_pretrained("BAAI/Emu3-Chat-hf")
>>> conversation = [
... {
... "role": "system",
... "content": [
... {"type": "text", "text": "You are a helpful assistant."},
... ],
... },
... {
... "role": "user",
... "content": [
... {"type": "image"},
... {"type": "text", "text": "Please describe the image."},
... ],
... },
... ]
>>> prompt = processor.apply_chat_template(conversation, add_generation_prompt=True)
>>> image = Image.open(requests.get("https://www.ilankelman.org/stopsigns/australia.jpg", stream=True).raw)
>>> inputs = processor(images=[image], text=[prompt], return_tensors="pt").to(model.device, torch.bfloat16)
>>> generated_ids = model.generate(**inputs, max_new_tokens=100, do_sample=False)
>>> processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
```'''
pass
def prepare_inputs_for_generation(self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, cache_position=None, position_ids=None, use_cache=True, pixel_values=None, **kwargs):
pass
| 18
| 1
| 25
| 3
| 12
| 11
| 2
| 0.88
| 2
| 10
| 5
| 0
| 6
| 3
| 6
| 7
| 163
| 23
| 75
| 38
| 49
| 66
| 38
| 20
| 31
| 7
| 2
| 1
| 12
|
2,328
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/emu3/modeling_emu3.py
|
transformers.models.emu3.modeling_emu3.Emu3ImageVocabularyMapping
|
import torch
from functools import cached_property
import torch.nn as nn
import torch.nn.functional as F
class Emu3ImageVocabularyMapping:
"""
A class for mapping discrete image tokens from VQGAN to BPE tokens.
"""
def __init__(self, vocab_map):
self.vocab_map = vocab_map
self.eol_token_id = vocab_map.get('<|extra_200|>')
self.image_token_id = vocab_map.get('<image>')
@cached_property
def image_tokens(self):
return sorted([val for name, val in self.vocab_map.items() if name.startswith('<|visual token')])
@cached_property
def image_tokens_str(self):
return sorted([name for name, val in self.vocab_map.items() if name.startswith('<|visual token')])
@cached_property
def img2bpe(self):
return {int(token[-8:-2]): self.vocab_map[token] for token in self.image_tokens_str}
@cached_property
def bpe2img(self):
return {v: k for k, v in self.img2bpe.items()}
@cached_property
def bpe2img_mapping_tensor(self):
mapping = torch.zeros(max(self.bpe2img.keys()) + 1, dtype=torch.int)
for k, v in self.bpe2img.items():
mapping[k] = v
return mapping
@cached_property
def img2bpe_mapping_tensor(self):
mapping = torch.zeros(max(self.img2bpe.keys()) + 1, dtype=torch.int)
for k, v in self.img2bpe.items():
mapping[k] = v
return mapping
def convert_img2bpe(self, img_batch: list[torch.Tensor]) -> torch.Tensor:
device = img_batch.device
eol_row = torch.ones((img_batch.shape[0], 1), dtype=torch.int) * self.eol_token_id
img_tokens = self.img2bpe_mapping_tensor[img_batch.to('cpu')]
img_tokens = torch.cat([img_tokens, eol_row], dim=-1)
return img_tokens.to(device)
def convert_bpe2img(self, img_batch: torch.Tensor) -> torch.Tensor:
device = img_batch.device
img_batch = img_batch[..., :-1]
img_tokens = self.bpe2img_mapping_tensor[img_batch.to('cpu')]
return img_tokens.to(device)
|
class Emu3ImageVocabularyMapping:
'''
A class for mapping discrete image tokens from VQGAN to BPE tokens.
'''
def __init__(self, vocab_map):
pass
@cached_property
def image_tokens(self):
pass
@cached_property
def image_tokens_str(self):
pass
@cached_property
def img2bpe(self):
pass
@cached_property
def bpe2img(self):
pass
@cached_property
def bpe2img_mapping_tensor(self):
pass
@cached_property
def img2bpe_mapping_tensor(self):
pass
def convert_img2bpe(self, img_batch: list[torch.Tensor]) -> torch.Tensor:
pass
def convert_bpe2img(self, img_batch: torch.Tensor) -> torch.Tensor:
pass
| 16
| 1
| 4
| 0
| 4
| 0
| 1
| 0.1
| 0
| 2
| 0
| 0
| 9
| 3
| 9
| 9
| 52
| 9
| 40
| 30
| 24
| 4
| 34
| 22
| 24
| 2
| 0
| 1
| 11
|
2,329
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/emu3/modeling_emu3.py
|
transformers.models.emu3.modeling_emu3.Emu3MLP
|
import torch.nn as nn
from ...activations import ACT2FN
class Emu3MLP(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.hidden_size = config.hidden_size
self.intermediate_size = config.intermediate_size
self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=config.mlp_bias)
self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=config.mlp_bias)
self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=config.mlp_bias)
self.act_fn = ACT2FN[config.hidden_act]
def forward(self, x):
down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
return down_proj
|
class Emu3MLP(nn.Module):
def __init__(self, config):
pass
def forward(self, x):
pass
| 3
| 0
| 6
| 0
| 6
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 2
| 7
| 2
| 12
| 14
| 1
| 13
| 11
| 10
| 0
| 13
| 11
| 10
| 1
| 1
| 0
| 2
|
2,330
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/emu3/modeling_emu3.py
|
transformers.models.emu3.modeling_emu3.Emu3PreTrainedModel
|
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple
from .configuration_emu3 import Emu3Config, Emu3TextConfig, Emu3VQVAEConfig
@auto_docstring
class Emu3PreTrainedModel(PreTrainedModel):
config: Emu3Config
base_model_prefix = 'model'
supports_gradient_checkpointing = True
_no_split_modules = ['Emu3DecoderLayer']
_skip_keys_device_placement = ['past_key_values', 'causal_mask']
_supports_flash_attn = True
_supports_sdpa = True
_can_compile_fullgraph = True
_supports_param_buffer_assignment = False
_supports_flex_attn = True
_supports_attention_backend = True
|
@auto_docstring
class Emu3PreTrainedModel(PreTrainedModel):
pass
| 2
| 0
| 12
| 0
| 12
| 0
| 6
| 0
| 1
| 1
| 1
| 3
| 1
| 0
| 1
| 1
| 28
| 1
| 27
| 15
| 25
| 0
| 23
| 15
| 21
| 6
| 1
| 2
| 6
|
2,331
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/emu3/modeling_emu3.py
|
transformers.models.emu3.modeling_emu3.Emu3RMSNorm
|
from ...integrations import use_kernel_forward_from_hub
import torch.nn as nn
import torch.nn.functional as F
import torch
@use_kernel_forward_from_hub('RMSNorm')
class Emu3RMSNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-06):
"""
Emu3RMSNorm is equivalent to T5LayerNorm
"""
super().__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.variance_epsilon = eps
def forward(self, hidden_states):
input_dtype = hidden_states.dtype
hidden_states = hidden_states.to(torch.float32)
variance = hidden_states.pow(2).mean(-1, keepdim=True)
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
return self.weight * hidden_states.to(input_dtype)
def extra_repr(self):
return f'{tuple(self.weight.shape)}, eps={self.variance_epsilon}'
|
@use_kernel_forward_from_hub('RMSNorm')
class Emu3RMSNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-06):
'''
Emu3RMSNorm is equivalent to T5LayerNorm
'''
pass
def forward(self, hidden_states):
pass
def extra_repr(self):
pass
| 5
| 1
| 5
| 0
| 4
| 1
| 1
| 0.23
| 1
| 2
| 0
| 0
| 3
| 2
| 3
| 13
| 18
| 2
| 13
| 8
| 9
| 3
| 13
| 8
| 9
| 1
| 1
| 0
| 3
|
2,332
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/emu3/modeling_emu3.py
|
transformers.models.emu3.modeling_emu3.Emu3RotaryEmbedding
|
import torch.nn as nn
from ...modeling_rope_utils import ROPE_INIT_FUNCTIONS, dynamic_rope_update
import torch
from .configuration_emu3 import Emu3Config, Emu3TextConfig, Emu3VQVAEConfig
import torch.nn.functional as F
class Emu3RotaryEmbedding(nn.Module):
inv_freq: torch.Tensor
def __init__(self, config: Emu3Config, device=None):
super().__init__()
if hasattr(config, 'rope_scaling') and isinstance(config.rope_scaling, dict):
self.rope_type = config.rope_scaling.get('rope_type', config.rope_scaling.get('type'))
else:
self.rope_type = 'default'
self.max_seq_len_cached = config.max_position_embeddings
self.original_max_seq_len = config.max_position_embeddings
self.config = config
self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device)
self.register_buffer('inv_freq', inv_freq, persistent=False)
self.original_inv_freq = self.inv_freq
@torch.no_grad()
@dynamic_rope_update
def forward(self, x, position_ids):
inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device)
position_ids_expanded = position_ids[:, None, :].float()
device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != 'mps' else 'cpu'
with torch.autocast(device_type=device_type, enabled=False):
freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
emb = torch.cat((freqs, freqs), dim=-1)
cos = emb.cos() * self.attention_scaling
sin = emb.sin() * self.attention_scaling
return (cos.to(dtype=x.dtype), sin.to(dtype=x.dtype))
|
class Emu3RotaryEmbedding(nn.Module):
def __init__(self, config: Emu3Config, device=None):
pass
@torch.no_grad()
@dynamic_rope_update
def forward(self, x, position_ids):
pass
| 5
| 0
| 18
| 2
| 13
| 5
| 3
| 0.35
| 1
| 4
| 1
| 0
| 3
| 7
| 3
| 13
| 59
| 8
| 40
| 21
| 35
| 14
| 38
| 20
| 34
| 3
| 1
| 1
| 8
|
2,333
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/emu3/modeling_emu3.py
|
transformers.models.emu3.modeling_emu3.Emu3TextModel
|
from ...cache_utils import Cache, DynamicCache
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple
from typing import Callable, Optional, Union
from ...processing_utils import Unpack
from .configuration_emu3 import Emu3Config, Emu3TextConfig, Emu3VQVAEConfig
from ...masking_utils import create_causal_mask
import torch.nn.functional as F
from ...utils.generic import check_model_inputs
import torch.nn as nn
import torch
from ...modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast
@auto_docstring
class Emu3TextModel(Emu3PreTrainedModel):
_can_record_outputs = {'hidden_states': Emu3DecoderLayer, 'attentions': Emu3Attention}
def __init__(self, config: Emu3Config):
super().__init__(config)
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
self.layers = nn.ModuleList([Emu3DecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)])
self.norm = Emu3RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.rotary_emb = Emu3RotaryEmbedding(config=config)
self.gradient_checkpointing = False
self.post_init()
@check_model_inputs
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, cache_position: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, **kwargs: Unpack[TransformersKwargs]) -> BaseModelOutputWithPast:
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError('You must specify exactly one of input_ids or inputs_embeds')
if inputs_embeds is None:
inputs_embeds: torch.Tensor = self.embed_tokens(input_ids)
if use_cache and past_key_values is None:
past_key_values = DynamicCache(config=self.config)
if cache_position is None:
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
cache_position: torch.Tensor = torch.arange(past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device)
if position_ids is None:
position_ids = cache_position.unsqueeze(0)
causal_mask = create_causal_mask(config=self.config, input_embeds=inputs_embeds, attention_mask=attention_mask, cache_position=cache_position, past_key_values=past_key_values, position_ids=position_ids)
hidden_states = inputs_embeds
position_embeddings = self.rotary_emb(hidden_states, position_ids)
for decoder_layer in self.layers[:self.config.num_hidden_layers]:
hidden_states = decoder_layer(hidden_states, attention_mask=causal_mask, position_ids=position_ids, past_key_values=past_key_values, cache_position=cache_position, position_embeddings=position_embeddings, **kwargs)
hidden_states = self.norm(hidden_states)
return BaseModelOutputWithPast(last_hidden_state=hidden_states, past_key_values=past_key_values)
|
@auto_docstring
class Emu3TextModel(Emu3PreTrainedModel):
def __init__(self, config: Emu3Config):
pass
@check_model_inputs
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, cache_position: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, **kwargs: Unpack[TransformersKwargs]) -> BaseModelOutputWithPast:
pass
| 6
| 0
| 40
| 5
| 30
| 6
| 6
| 0.22
| 1
| 16
| 10
| 0
| 5
| 7
| 6
| 7
| 257
| 34
| 184
| 65
| 146
| 40
| 89
| 34
| 82
| 21
| 2
| 2
| 37
|
2,334
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/emu3/modeling_emu3.py
|
transformers.models.emu3.modeling_emu3.Emu3VQVAE
|
from .configuration_emu3 import Emu3Config, Emu3TextConfig, Emu3VQVAEConfig
import torch.nn.functional as F
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
import math
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple
import torch.nn as nn
import torch
@auto_docstring(custom_intro='\n The VQ-VAE model used in Emu3 for encoding/decoding images into discrete tokens.\n This model follows the "Make-a-scene: Scene-based text-to-image generation with human priors" paper from\n [ Oran Gafni, Adam Polyak, Oron Ashual, Shelly Sheynin, Devi Parikh, and Yaniv\n Taigman](https://huggingface.co/papers/2203.13131).\n ')
class Emu3VQVAE(PreTrainedModel):
config: Emu3VQVAEConfig
base_model_prefix = 'emuvideovq'
main_input_name = 'pixel_values'
_supports_sdpa = True
_supports_flash_attn = True
_supports_flex_attn = True
_supports_attention_backend = True
_no_split_modules = ['Emu3VQVAETemporalResnetBlock', 'Emu3VQVAEAttentionBlock', 'Emu3VQVAEResnetBlock', 'Emu3VQVAEVectorQuantizer']
def _init_weights(self, module):
if isinstance(module, (nn.Conv2d, nn.Conv3d)):
nn.init.kaiming_normal_(module.weight, mode='fan_out', nonlinearity='relu')
if module.bias is not None:
fan_in, _ = nn.init._calculate_fan_in_and_fan_out(module.weight)
bound = 1 / math.sqrt(fan_in)
nn.init.uniform_(module.bias, -bound, bound)
elif isinstance(module, nn.Linear):
nn.init.kaiming_uniform_(module.weight, a=math.sqrt(5))
if module.bias is not None:
fan_in, _ = nn.init._calculate_fan_in_and_fan_out(module.weight)
bound = 1 / math.sqrt(fan_in) if fan_in > 0 else 0
nn.init.uniform_(module.bias, -bound, bound)
elif isinstance(module, (nn.BatchNorm2d, nn.BatchNorm3d, nn.GroupNorm)):
nn.init.constant_(module.weight, 1.0)
nn.init.constant_(module.bias, 0.0)
elif isinstance(module, nn.Embedding):
module.weight.data.normal_()
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
def __init__(self, config: Emu3VQVAEConfig):
super().__init__(config)
self.config = config
self.encoder = Emu3VQVAEEncoder(config)
self.decoder = Emu3VQVAEDecoder(config)
self.quantize = Emu3VQVAEVectorQuantizer(config)
self.vision_spatial_factor = 2 ** (len(config.channel_multiplier) - 1)
self.quant_conv = Emu3VQVAEConv3d(config.latent_channels, config.embed_dim, kernel_size=(3, 1, 1), stride=(1, 1, 1))
self.post_quant_conv = Emu3VQVAEConv3d(config.embed_dim, config.latent_channels, kernel_size=(3, 1, 1), stride=(1, 1, 1))
self.spatial_scale_factor = 2 ** (len(config.channel_multiplier) - 1)
self.eval()
self.post_init()
def encode(self, pixel_values: torch.Tensor, image_sizes: torch.Tensor):
is_image = pixel_values.ndim == 4
if is_image:
temporal = self.config.temporal_downsample_factor
batch_size, channels, height, width = pixel_values.shape
pixel_values = pixel_values.unsqueeze(1).repeat(1, temporal, 1, 1, 1)
else:
batch_size, temporal, channels, height, width = pixel_values.shape
hidden_states = self.encoder(pixel_values)
hidden_states = hidden_states.permute(0, 2, 1, 3, 4)
hidden_states = self.quant_conv(hidden_states)
hidden_states = hidden_states.permute(0, 2, 1, 3, 4)
codes = self.quantize(hidden_states)
image_tokens = codes.squeeze(1) if is_image else codes
image_tokens = [single_image[:int(size[0] / self.vision_spatial_factor), :int(size[1] / self.vision_spatial_factor)] for single_image, size in zip(image_tokens, image_sizes)]
return image_tokens
def decode(self, hidden_states: torch.Tensor):
is_image = hidden_states.ndim == 3
if is_image:
hidden_states = hidden_states.unsqueeze(1)
batch_size, temporal, height, width = hidden_states.shape
quant = self.quantize.embedding(hidden_states.flatten())
channels = quant.shape[-1]
quant = quant.view(batch_size, temporal, height, width, channels).permute(0, 4, 1, 2, 3).contiguous()
post_quant = self.post_quant_conv(quant)
quant = quant.permute(0, 2, 1, 3, 4)
post_quant = post_quant.permute(0, 2, 1, 3, 4)
video = self.decoder(post_quant, quant)
video = video.reshape(batch_size, temporal * self.config.temporal_downsample_factor, self.config.out_channels, height * self.spatial_scale_factor, width * self.spatial_scale_factor)
return video[:, 0] if is_image else video
|
@auto_docstring(custom_intro='\n The VQ-VAE model used in Emu3 for encoding/decoding images into discrete tokens.\n This model follows the "Make-a-scene: Scene-based text-to-image generation with human priors" paper from\n [ Oran Gafni, Adam Polyak, Oron Ashual, Shelly Sheynin, Devi Parikh, and Yaniv\n Taigman](https://huggingface.co/papers/2203.13131).\n ')
class Emu3VQVAE(PreTrainedModel):
def _init_weights(self, module):
pass
def __init__(self, config: Emu3VQVAEConfig):
pass
def encode(self, pixel_values: torch.Tensor, image_sizes: torch.Tensor):
pass
def decode(self, hidden_states: torch.Tensor):
pass
| 6
| 0
| 21
| 4
| 17
| 1
| 3
| 0.04
| 1
| 9
| 5
| 0
| 4
| 8
| 4
| 4
| 97
| 18
| 77
| 31
| 72
| 3
| 56
| 31
| 51
| 6
| 1
| 2
| 13
|
2,335
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/emu3/modeling_emu3.py
|
transformers.models.emu3.modeling_emu3.Emu3VQVAEAttentionBlock
|
import torch.nn.functional as F
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from .configuration_emu3 import Emu3Config, Emu3TextConfig, Emu3VQVAEConfig
import torch.nn as nn
import torch
from typing import Callable, Optional, Union
class Emu3VQVAEAttentionBlock(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, config: Emu3VQVAEConfig):
super().__init__()
self.config = config
self.embed_dim = config.hidden_size
self.num_heads = config.num_attention_heads
self.head_dim = self.embed_dim // self.num_heads
if self.head_dim * self.num_heads != self.embed_dim:
raise ValueError(f'embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {self.num_heads}).')
self.scale = self.head_dim ** (-0.5)
self.dropout = config.attention_dropout
self.is_causal = False
self.k_proj = nn.Linear(self.embed_dim, self.embed_dim)
self.v_proj = nn.Linear(self.embed_dim, self.embed_dim)
self.q_proj = nn.Linear(self.embed_dim, self.embed_dim)
self.out_proj = nn.Linear(self.embed_dim, self.embed_dim)
self.num_key_value_groups = 1
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, **kwargs) -> tuple[torch.Tensor, Optional[torch.Tensor]]:
"""Input shape: Batch x Time x Channel"""
batch_size, seq_length, embed_dim = hidden_states.shape
queries = self.q_proj(hidden_states)
keys = self.k_proj(hidden_states)
values = self.v_proj(hidden_states)
queries = queries.view(batch_size, seq_length, self.num_heads, self.head_dim).transpose(1, 2)
keys = keys.view(batch_size, seq_length, self.num_heads, self.head_dim).transpose(1, 2)
values = values.view(batch_size, seq_length, self.num_heads, self.head_dim).transpose(1, 2)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != 'eager':
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(self, queries, keys, values, attention_mask, is_causal=self.is_causal, scaling=self.scale, dropout=0.0 if not self.training else self.dropout)
attn_output = attn_output.reshape(batch_size, seq_length, embed_dim).contiguous()
attn_output = self.out_proj(attn_output)
return (attn_output, attn_weights)
|
class Emu3VQVAEAttentionBlock(nn.Module):
'''Multi-headed attention from 'Attention Is All You Need' paper'''
def __init__(self, config: Emu3VQVAEConfig):
pass
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, **kwargs) -> tuple[torch.Tensor, Optional[torch.Tensor]]:
'''Input shape: Batch x Time x Channel'''
pass
| 3
| 2
| 35
| 6
| 28
| 1
| 4
| 0.05
| 1
| 4
| 0
| 0
| 2
| 10
| 2
| 12
| 73
| 14
| 56
| 25
| 48
| 3
| 40
| 20
| 37
| 5
| 1
| 2
| 7
|
2,336
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/emu3/modeling_emu3.py
|
transformers.models.emu3.modeling_emu3.Emu3VQVAEConv3d
|
import torch.nn as nn
import torch.nn.functional as F
import torch
class Emu3VQVAEConv3d(nn.Module):
def __init__(self, in_channel: int, out_channel: int, kernel_size: tuple[int], stride: tuple[int]):
super().__init__()
padding_sizes = [one_kernel - one_stride for one_kernel, one_stride in zip(kernel_size[1:], stride[1:])]
self.padding = ()
for pad_size in padding_sizes[::-1]:
self.padding += (pad_size // 2 + pad_size % 2, pad_size // 2)
self.padding += (2, 0)
self.conv = nn.Conv3d(in_channel, out_channel, kernel_size, stride=stride)
def forward(self, hidden_states: torch.Tensor):
hidden_states = F.pad(hidden_states, self.padding)
hidden_states = self.conv(hidden_states)
return hidden_states
|
class Emu3VQVAEConv3d(nn.Module):
def __init__(self, in_channel: int, out_channel: int, kernel_size: tuple[int], stride: tuple[int]):
pass
def forward(self, hidden_states: torch.Tensor):
pass
| 3
| 0
| 13
| 1
| 12
| 0
| 2
| 0
| 1
| 4
| 0
| 0
| 2
| 2
| 2
| 12
| 27
| 3
| 24
| 13
| 15
| 0
| 13
| 7
| 10
| 2
| 1
| 1
| 3
|
2,337
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/emu3/modeling_emu3.py
|
transformers.models.emu3.modeling_emu3.Emu3VQVAEDecoder
|
import torch
from .configuration_emu3 import Emu3Config, Emu3TextConfig, Emu3VQVAEConfig
import torch.nn.functional as F
import math
import torch.nn as nn
class Emu3VQVAEDecoder(nn.Module):
def __init__(self, config: Emu3VQVAEConfig):
super().__init__()
quant_channels = config.embed_dim
block_in = config.base_channels * config.channel_multiplier[-1]
self.time_res_stack = nn.ModuleList()
for _ in range(config.num_res_blocks):
time_res_conv = Emu3VQVAETemporalResnetBlock(in_channels=config.latent_channels, out_channels=config.latent_channels)
self.time_res_stack.append(time_res_conv)
temp_upsample_block_num = int(math.log2(config.temporal_downsample_factor))
self.time_conv = nn.ModuleList()
for i in range(temp_upsample_block_num):
conv = Emu3VQVAETemporalUpsample(config.latent_channels, config.latent_channels)
self.time_conv.append(conv)
self.conv_in = nn.Conv2d(config.latent_channels, block_in, kernel_size=3, stride=1, padding=1)
self.middle_block = Emu3VQVAEMiddleBlock(config, block_in, quant_channels=quant_channels)
self.up_block = Emu3VQVAEUpBlock(config)
block_in = config.base_channels * config.channel_multiplier[0]
self.norm_out = Emu3VQVAESpatialNorm(quant_channels, block_in)
self.conv_out = nn.Conv2d(block_in, config.out_channels, kernel_size=3, stride=1, padding=1)
def forward(self, hidden_states: torch.Tensor, quant_states: torch.Tensor):
hidden_quant_states = torch.cat((hidden_states, quant_states), dim=0)
hidden_quant_states = hidden_quant_states.permute(0, 2, 1, 3, 4)
for layer in self.time_res_stack:
hidden_quant_states = layer(hidden_quant_states)
for layer in self.time_conv:
hidden_quant_states = layer(hidden_quant_states)
hidden_quant_states *= torch.sigmoid(hidden_quant_states)
hidden_quant_states = hidden_quant_states.permute(0, 2, 1, 3, 4)
hidden_states, quant_states = torch.chunk(hidden_quant_states, 2, dim=0)
hidden_states = hidden_states.reshape(-1, *hidden_states.shape[2:])
quant_states = quant_states.reshape(-1, *quant_states.shape[2:])
hidden_states = self.conv_in(hidden_states)
hidden_states = self.middle_block(hidden_states, quant_states)
hidden_states = self.up_block(hidden_states, quant_states)
hidden_states = self.norm_out(hidden_states, quant_states)
hidden_states *= torch.sigmoid(hidden_states)
hidden_states = self.conv_out(hidden_states)
return hidden_states
|
class Emu3VQVAEDecoder(nn.Module):
def __init__(self, config: Emu3VQVAEConfig):
pass
def forward(self, hidden_states: torch.Tensor, quant_states: torch.Tensor):
pass
| 3
| 0
| 33
| 6
| 26
| 1
| 3
| 0.04
| 1
| 10
| 6
| 0
| 2
| 7
| 2
| 12
| 68
| 13
| 53
| 19
| 50
| 2
| 39
| 19
| 36
| 3
| 1
| 1
| 6
|
2,338
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/emu3/modeling_emu3.py
|
transformers.models.emu3.modeling_emu3.Emu3VQVAEDownBlock
|
import torch.nn as nn
import torch.nn.functional as F
import torch
class Emu3VQVAEDownBlock(nn.Module):
def __init__(self, config):
super().__init__()
self.num_resolutions = len(config.channel_multiplier)
self.num_res_blocks = config.num_res_blocks
base_channels = config.base_channels
channel_multiplier = config.channel_multiplier
in_channel_multiplier = (1,) + tuple(channel_multiplier)
self.in_channel_multiplier = in_channel_multiplier
self.down = nn.ModuleList()
for i_level in range(self.num_resolutions):
block = nn.ModuleList()
attn = nn.ModuleList()
attn_norms = nn.ModuleList()
block_in = base_channels * in_channel_multiplier[i_level]
block_out = base_channels * channel_multiplier[i_level]
for i_block in range(self.num_res_blocks):
block.append(Emu3VQVAEResnetBlock(in_channels=block_in, out_channels=block_out))
block_in = block_out
if config.attn_resolutions is not None and i_level in config.attn_resolutions:
attn.append(Emu3VQVAEAttentionBlock(config))
attn_norms.append(nn.GroupNorm(num_channels=block_in, num_groups=32, eps=1e-06, affine=True))
down = nn.Module()
down.block = block
down.attn = attn
down.attn_norms = attn_norms
if i_level != self.num_resolutions - 1:
down.downsample = Emu3VQVAEEncoderConvDownsample(block_in)
self.down.append(down)
def forward(self, hidden_states: torch.FloatTensor):
for i_level, blocks in enumerate(self.down):
for i_block in range(self.num_res_blocks):
hidden_states = blocks.block[i_block](hidden_states)
if len(blocks.attn) > 0:
residual = hidden_states
hidden_states = blocks.attn_norms[i_block](hidden_states)
batch_size, channels, height, width = hidden_states.shape
hidden_states = hidden_states.view(batch_size, channels, height * width).transpose(1, 2)
hidden_states = blocks.attn[i_block](hidden_states)[0]
hidden_states = hidden_states.reshape(batch_size, height, width, channels).permute(0, 3, 1, 2)
hidden_states = residual + hidden_states
if i_level != self.num_resolutions - 1:
hidden_states = blocks.downsample(hidden_states)
return hidden_states
|
class Emu3VQVAEDownBlock(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states: torch.FloatTensor):
pass
| 3
| 0
| 28
| 4
| 24
| 0
| 5
| 0
| 1
| 7
| 3
| 0
| 2
| 4
| 2
| 12
| 57
| 8
| 49
| 22
| 46
| 0
| 44
| 22
| 41
| 5
| 1
| 3
| 10
|
2,339
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/emu3/modeling_emu3.py
|
transformers.models.emu3.modeling_emu3.Emu3VQVAEEncoder
|
import math
import torch.nn as nn
import torch
import torch.nn.functional as F
class Emu3VQVAEEncoder(nn.Module):
def __init__(self, config):
super().__init__()
base_channels = config.base_channels
in_channels = config.in_channels
double_latent = config.double_latent
latent_channels = config.latent_channels
channel_multiplier = config.channel_multiplier
out_channels = 2 * latent_channels if double_latent else latent_channels
block_in = base_channels * channel_multiplier[-1]
self.conv_in = torch.nn.Conv2d(in_channels, base_channels, kernel_size=3, stride=1, padding=1)
self.down_block = Emu3VQVAEDownBlock(config)
self.middle_block = Emu3VQVAEMiddleBlock(config, block_in)
self.norm_out = torch.nn.GroupNorm(num_groups=32, num_channels=block_in, eps=1e-06, affine=True)
self.conv_out = torch.nn.Conv2d(block_in, out_channels, kernel_size=3, stride=1, padding=1)
temporal_down_blocks = int(math.log2(config.temporal_downsample_factor))
self.time_conv = nn.ModuleList()
self.time_res_stack = nn.ModuleList()
for i in range(temporal_down_blocks):
conv = Emu3VQVAETemporalDownsample(out_channels, out_channels)
self.time_conv.append(conv)
for _ in range(config.num_res_blocks):
time_res_conv = Emu3VQVAETemporalResnetBlock(in_channels=out_channels, out_channels=out_channels)
self.time_res_stack.append(time_res_conv)
def forward(self, pixel_values: torch.LongTensor):
temporal_dim = pixel_values.shape[1]
pixel_values = pixel_values.reshape(-1, *pixel_values.shape[2:])
hidden_states = self.conv_in(pixel_values)
hidden_states = self.down_block(hidden_states)
hidden_states = self.middle_block(hidden_states)
hidden_states = self.norm_out(hidden_states)
hidden_states *= torch.sigmoid(hidden_states)
hidden_states = self.conv_out(hidden_states)
hidden_states = hidden_states.reshape(-1, temporal_dim, *hidden_states.shape[1:])
hidden_states = hidden_states.permute(0, 2, 1, 3, 4)
for conv in self.time_conv:
hidden_states = conv(hidden_states)
hidden_states *= torch.sigmoid(hidden_states)
for layer in self.time_res_stack:
hidden_states = layer(hidden_states)
hidden_states = hidden_states.permute(0, 2, 1, 3, 4)
return hidden_states
|
class Emu3VQVAEEncoder(nn.Module):
def __init__(self, config):
pass
def forward(self, pixel_values: torch.LongTensor):
pass
| 3
| 0
| 33
| 7
| 25
| 2
| 4
| 0.06
| 1
| 7
| 4
| 0
| 2
| 7
| 2
| 12
| 68
| 14
| 51
| 26
| 48
| 3
| 42
| 26
| 39
| 4
| 1
| 1
| 7
|
2,340
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/emu3/modeling_emu3.py
|
transformers.models.emu3.modeling_emu3.Emu3VQVAEEncoderConvDownsample
|
import torch.nn as nn
import torch.nn.functional as F
class Emu3VQVAEEncoderConvDownsample(nn.Module):
def __init__(self, in_channels):
super().__init__()
self.conv = nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=2, padding=0)
def forward(self, hidden_states):
hidden_states = F.pad(hidden_states, pad=(0, 1, 0, 1), mode='constant', value=0)
hidden_states = self.conv(hidden_states)
return hidden_states
|
class Emu3VQVAEEncoderConvDownsample(nn.Module):
def __init__(self, in_channels):
pass
def forward(self, hidden_states):
pass
| 3
| 0
| 4
| 0
| 4
| 1
| 1
| 0.13
| 1
| 1
| 0
| 0
| 2
| 1
| 2
| 12
| 10
| 1
| 8
| 4
| 5
| 1
| 8
| 4
| 5
| 1
| 1
| 0
| 2
|
2,341
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/emu3/modeling_emu3.py
|
transformers.models.emu3.modeling_emu3.Emu3VQVAEEncoderConvUpsample
|
import torch.nn as nn
import torch.nn.functional as F
class Emu3VQVAEEncoderConvUpsample(nn.Module):
def __init__(self, in_channels):
super().__init__()
self.conv = nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1)
def forward(self, hidden_states):
hidden_states = F.interpolate(hidden_states, scale_factor=2.0, mode='nearest')
hidden_states = self.conv(hidden_states)
return hidden_states
|
class Emu3VQVAEEncoderConvUpsample(nn.Module):
def __init__(self, in_channels):
pass
def forward(self, hidden_states):
pass
| 3
| 0
| 4
| 0
| 4
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 2
| 1
| 2
| 12
| 9
| 1
| 8
| 4
| 5
| 0
| 8
| 4
| 5
| 1
| 1
| 0
| 2
|
2,342
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/emu3/modeling_emu3.py
|
transformers.models.emu3.modeling_emu3.Emu3VQVAEGroupNorm
|
import torch.nn as nn
import torch.nn.functional as F
class Emu3VQVAEGroupNorm(nn.GroupNorm):
"""
Same as the torch GroupNorm with the only difference that this ones accepts
an optional kwarg `quant_states` which is not used. This class makes it easier to
use SpatialNorm or GroupNorm without conditionals
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
def forward(self, input, quant_states=None):
return F.group_norm(input, self.num_groups, self.weight, self.bias, self.eps)
|
class Emu3VQVAEGroupNorm(nn.GroupNorm):
'''
Same as the torch GroupNorm with the only difference that this ones accepts
an optional kwarg `quant_states` which is not used. This class makes it easier to
use SpatialNorm or GroupNorm without conditionals
'''
def __init__(self, **kwargs):
pass
def forward(self, input, quant_states=None):
pass
| 3
| 1
| 2
| 0
| 2
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 2
| 0
| 2
| 2
| 12
| 2
| 5
| 3
| 2
| 5
| 5
| 3
| 2
| 1
| 1
| 0
| 2
|
2,343
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/emu3/modeling_emu3.py
|
transformers.models.emu3.modeling_emu3.Emu3VQVAEMiddleBlock
|
import torch.nn as nn
import torch.nn.functional as F
from typing import Callable, Optional, Union
import torch
class Emu3VQVAEMiddleBlock(nn.Module):
def __init__(self, config, in_channels, quant_channels=None):
super().__init__()
self.block_1 = Emu3VQVAEResnetBlock(in_channels=in_channels, out_channels=in_channels, quant_channels=quant_channels)
self.attn_1 = Emu3VQVAEAttentionBlock(config)
if quant_channels is None:
self.attn_norm = Emu3VQVAEGroupNorm(num_channels=in_channels, num_groups=32, eps=1e-06, affine=True)
else:
self.attn_norm = Emu3VQVAESpatialNorm(quant_channels, in_channels)
self.block_2 = Emu3VQVAEResnetBlock(in_channels=in_channels, out_channels=in_channels, quant_channels=quant_channels)
def forward(self, hidden_states: torch.FloatTensor, quant_states: Optional[torch.FloatTensor]=None):
hidden_states = self.block_1(hidden_states, quant_states)
residual = hidden_states
hidden_states = self.attn_norm(hidden_states, quant_states)
batch_size, channels, height, width = hidden_states.shape
hidden_states = hidden_states.view(batch_size, channels, height * width).transpose(1, 2)
hidden_states = self.attn_1(hidden_states)[0]
hidden_states = hidden_states.reshape(batch_size, height, width, channels).permute(0, 3, 1, 2)
hidden_states = residual + hidden_states
hidden_states = self.block_2(hidden_states, quant_states)
return hidden_states
|
class Emu3VQVAEMiddleBlock(nn.Module):
def __init__(self, config, in_channels, quant_channels=None):
pass
def forward(self, hidden_states: torch.FloatTensor, quant_states: Optional[torch.FloatTensor]=None):
pass
| 3
| 0
| 15
| 1
| 14
| 0
| 2
| 0
| 1
| 5
| 4
| 0
| 2
| 4
| 2
| 12
| 32
| 3
| 29
| 9
| 26
| 0
| 20
| 9
| 17
| 2
| 1
| 1
| 3
|
2,344
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/emu3/modeling_emu3.py
|
transformers.models.emu3.modeling_emu3.Emu3VQVAEResnetBlock
|
import torch
import torch.nn as nn
from typing import Callable, Optional, Union
import torch.nn.functional as F
class Emu3VQVAEResnetBlock(nn.Module):
def __init__(self, in_channels: int, out_channels: Optional[int]=None, quant_channels: Optional[int]=None):
super().__init__()
self.in_channels = in_channels
out_channels = in_channels if out_channels is None else out_channels
self.out_channels = out_channels
self.quant_channels = quant_channels
if quant_channels is None:
self.norm1 = nn.GroupNorm(num_channels=in_channels, num_groups=32, eps=1e-06, affine=True)
self.norm2 = nn.GroupNorm(num_channels=out_channels, num_groups=32, eps=1e-06, affine=True)
else:
self.norm1 = Emu3VQVAESpatialNorm(quant_channels, in_channels)
self.norm2 = Emu3VQVAESpatialNorm(quant_channels, out_channels)
self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1)
self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1)
if self.in_channels != self.out_channels:
self.nin_shortcut = nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, padding=0)
def forward(self, hidden_states: torch.Tensor, quant_channels: Optional[torch.Tensor]=None):
norm_args = () if self.quant_channels is None else (quant_channels,)
residual = hidden_states
hidden_states = self.norm1(hidden_states, *norm_args)
hidden_states *= torch.sigmoid(hidden_states)
hidden_states = self.conv1(hidden_states)
hidden_states = self.norm2(hidden_states, *norm_args)
hidden_states *= torch.sigmoid(hidden_states)
hidden_states = self.conv2(hidden_states)
if self.in_channels != self.out_channels:
residual = self.nin_shortcut(residual)
return residual + hidden_states
|
class Emu3VQVAEResnetBlock(nn.Module):
def __init__(self, in_channels: int, out_channels: Optional[int]=None, quant_channels: Optional[int]=None):
pass
def forward(self, hidden_states: torch.Tensor, quant_channels: Optional[torch.Tensor]=None):
pass
| 3
| 0
| 30
| 4
| 26
| 0
| 4
| 0
| 1
| 4
| 1
| 0
| 2
| 8
| 2
| 12
| 61
| 9
| 52
| 18
| 44
| 0
| 28
| 13
| 25
| 4
| 1
| 1
| 7
|
2,345
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/emu3/modeling_emu3.py
|
transformers.models.emu3.modeling_emu3.Emu3VQVAESpatialNorm
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class Emu3VQVAESpatialNorm(nn.Module):
def __init__(self, in_channels: int, out_channels: int):
super().__init__()
self.norm_layer = nn.GroupNorm(num_channels=out_channels, num_groups=32, eps=1e-06, affine=True)
self.conv_y = nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, padding=0)
self.conv_b = nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, padding=0)
def forward(self, hidden_states: torch.Tensor, quant_states: torch.Tensor):
quant_states = F.interpolate(quant_states, size=hidden_states.shape[-2:], mode='nearest')
hidden_states = self.norm_layer(hidden_states)
hidden_states = hidden_states * self.conv_y(quant_states) + self.conv_b(quant_states)
return hidden_states
|
class Emu3VQVAESpatialNorm(nn.Module):
def __init__(self, in_channels: int, out_channels: int):
pass
def forward(self, hidden_states: torch.Tensor, quant_states: torch.Tensor):
pass
| 3
| 0
| 16
| 1
| 16
| 0
| 1
| 0
| 1
| 3
| 0
| 0
| 2
| 3
| 2
| 12
| 34
| 2
| 32
| 10
| 25
| 0
| 11
| 6
| 8
| 1
| 1
| 0
| 2
|
2,346
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/emu3/modeling_emu3.py
|
transformers.models.emu3.modeling_emu3.Emu3VQVAETemporalDownsample
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class Emu3VQVAETemporalDownsample(nn.Module):
def __init__(self, in_channel: int, out_channel: int):
super().__init__()
self.conv = Emu3VQVAEConv3d(in_channel, out_channel, kernel_size=(4, 3, 3), stride=(2, 1, 1))
def forward(self, hidden_states: torch.Tensor):
hidden_states = self.conv(hidden_states)
return hidden_states
|
class Emu3VQVAETemporalDownsample(nn.Module):
def __init__(self, in_channel: int, out_channel: int):
pass
def forward(self, hidden_states: torch.Tensor):
pass
| 3
| 0
| 8
| 0
| 8
| 0
| 1
| 0
| 1
| 4
| 1
| 0
| 2
| 1
| 2
| 12
| 17
| 1
| 16
| 8
| 9
| 0
| 7
| 4
| 4
| 1
| 1
| 0
| 2
|
2,347
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/emu3/modeling_emu3.py
|
transformers.models.emu3.modeling_emu3.Emu3VQVAETemporalResnetBlock
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class Emu3VQVAETemporalResnetBlock(nn.Module):
def __init__(self, in_channels, out_channels=None):
super().__init__()
self.in_channels = in_channels
self.out_channels = in_channels if out_channels is None else out_channels
self.norm1 = nn.BatchNorm3d(in_channels)
self.conv1 = Emu3VQVAEConv3d(in_channels, out_channels, kernel_size=(3, 3, 3), stride=(1, 1, 1))
self.norm2 = nn.BatchNorm3d(out_channels)
self.conv2 = Emu3VQVAEConv3d(out_channels, out_channels, kernel_size=(3, 3, 3), stride=(1, 1, 1))
if self.in_channels != self.out_channels:
self.nin_shortcut = nn.Conv3d(in_channels, out_channels, kernel_size=1, stride=1, padding=0)
def forward(self, hidden_states):
residual = hidden_states
hidden_states = self.norm1(hidden_states)
hidden_states *= torch.sigmoid(hidden_states)
hidden_states = self.conv1(hidden_states)
hidden_states = self.norm2(hidden_states)
hidden_states *= torch.sigmoid(hidden_states)
hidden_states = self.conv2(hidden_states)
if self.in_channels != self.out_channels:
residual = self.nin_shortcut(residual)
return residual + hidden_states
|
class Emu3VQVAETemporalResnetBlock(nn.Module):
def __init__(self, in_channels, out_channels=None):
pass
def forward(self, hidden_states):
pass
| 3
| 0
| 23
| 2
| 21
| 0
| 3
| 0
| 1
| 2
| 1
| 0
| 2
| 7
| 2
| 12
| 47
| 5
| 42
| 15
| 35
| 0
| 22
| 11
| 19
| 3
| 1
| 1
| 5
|
2,348
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/emu3/modeling_emu3.py
|
transformers.models.emu3.modeling_emu3.Emu3VQVAETemporalUpsample
|
import torch.nn as nn
import torch.nn.functional as F
import torch
class Emu3VQVAETemporalUpsample(nn.Module):
def __init__(self, in_channel: int, out_channel: int):
super().__init__()
self.conv = Emu3VQVAEConv3d(in_channel, out_channel, kernel_size=(3, 3, 3), stride=(1, 1, 1))
def forward(self, hidden_states: torch.Tensor):
batch_size, channels, temporal, height, width = hidden_states.shape
hidden_states = hidden_states.permute(0, 1, 3, 4, 2).contiguous().view(batch_size, -1, temporal)
hidden_states = F.interpolate(hidden_states, scale_factor=2.0, mode='nearest')
hidden_states = hidden_states.view(batch_size, channels, height, width, -1).permute(0, 1, 4, 2, 3).contiguous()
hidden_states = self.conv(hidden_states)
return hidden_states
|
class Emu3VQVAETemporalUpsample(nn.Module):
def __init__(self, in_channel: int, out_channel: int):
pass
def forward(self, hidden_states: torch.Tensor):
pass
| 3
| 0
| 10
| 0
| 10
| 0
| 1
| 0
| 1
| 4
| 1
| 0
| 2
| 1
| 2
| 12
| 21
| 1
| 20
| 9
| 13
| 0
| 11
| 5
| 8
| 1
| 1
| 0
| 2
|
2,349
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/emu3/modeling_emu3.py
|
transformers.models.emu3.modeling_emu3.Emu3VQVAEUpBlock
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class Emu3VQVAEUpBlock(nn.Module):
def __init__(self, config):
super().__init__()
self.num_resolutions = len(config.channel_multiplier)
self.num_res_blocks = config.num_res_blocks
quant_channels = config.embed_dim
block_in = config.base_channels * config.channel_multiplier[-1]
self.up = nn.ModuleList()
for i_level in reversed(range(self.num_resolutions)):
block = nn.ModuleList()
attn = nn.ModuleList()
attn_norms = nn.ModuleList()
block_out = config.base_channels * config.channel_multiplier[i_level]
for i_block in range(self.num_res_blocks + 1):
block.append(Emu3VQVAEResnetBlock(in_channels=block_in, out_channels=block_out, quant_channels=quant_channels))
block_in = block_out
if i_level in config.attn_resolutions:
attn.append(Emu3VQVAEAttentionBlock(config))
attn_norms.append(Emu3VQVAESpatialNorm(quant_channels, block_in))
up = nn.Module()
up.block = block
up.attn = attn
up.attn_norms = attn_norms
if i_level != 0:
up.upsample = Emu3VQVAEEncoderConvUpsample(block_in)
self.up.insert(0, up)
def forward(self, hidden_states: torch.FloatTensor, quant_states: torch.FloatTensor):
for i_level, blocks in enumerate(self.up[::-1]):
for i_block in range(self.num_res_blocks + 1):
hidden_states = blocks.block[i_block](hidden_states, quant_states)
if len(blocks.attn) > 0:
residual = hidden_states
hidden_states = blocks.attn_norms[i_block](hidden_states, quant_states)
batch_size, channels, height, width = hidden_states.shape
hidden_states = hidden_states.view(batch_size, channels, height * width).transpose(1, 2)
hidden_states = blocks.attn[i_block](hidden_states)[0]
hidden_states = hidden_states.reshape(batch_size, height, width, channels).permute(0, 3, 1, 2)
hidden_states = residual + hidden_states
if i_level != len(self.up) - 1:
hidden_states = blocks.upsample(hidden_states)
return hidden_states
|
class Emu3VQVAEUpBlock(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states: torch.FloatTensor, quant_states: torch.FloatTensor):
pass
| 3
| 0
| 27
| 4
| 23
| 0
| 5
| 0
| 1
| 8
| 4
| 0
| 2
| 3
| 2
| 12
| 56
| 9
| 47
| 19
| 44
| 0
| 41
| 19
| 38
| 5
| 1
| 3
| 10
|
2,350
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/emu3/modeling_emu3.py
|
transformers.models.emu3.modeling_emu3.Emu3VQVAEVectorQuantizer
|
import torch.nn as nn
import torch.nn.functional as F
from .configuration_emu3 import Emu3Config, Emu3TextConfig, Emu3VQVAEConfig
import torch
class Emu3VQVAEVectorQuantizer(nn.Module):
"""
A module for vector quantization using learned embedding vectors.
This module implements the quantization process similar to te one described in
the VQ-VAE (Vector Quantized Variational AutoEncoder) paper. It quantizes continuous
input vectors into discrete codebook vectors, which are learned during training.
Current implementation improves over previous ones by avoiding costly matrix multiplications
and allowing for post-hoc remapping of indices.
"""
def __init__(self, config: Emu3VQVAEConfig):
super().__init__()
self.embedding = nn.Embedding(config.codebook_size, config.embed_dim)
self.embedding.weight.data.uniform_(-1.0 / config.codebook_size, 1.0 / config.codebook_size)
def forward(self, hidden_state: torch.Tensor):
batch_size, temporal, channels, height, width = hidden_state.shape
hidden_state = hidden_state.permute(0, 1, 3, 4, 2).contiguous()
hidden_state_flattened = hidden_state.view(-1, channels)
hidden_state_sum = torch.sum(hidden_state_flattened ** 2, dim=1, keepdim=True)
embedding_sum = torch.sum(self.embedding.weight ** 2, dim=1)
distances = 2 * torch.matmul(hidden_state_flattened, self.embedding.weight.transpose(0, 1))
distances = hidden_state_sum + embedding_sum - distances
min_encoding_indices = torch.argmin(distances, dim=1)
min_encoding_indices = min_encoding_indices.view(batch_size, temporal, height, width)
return min_encoding_indices
|
class Emu3VQVAEVectorQuantizer(nn.Module):
'''
A module for vector quantization using learned embedding vectors.
This module implements the quantization process similar to te one described in
the VQ-VAE (Vector Quantized Variational AutoEncoder) paper. It quantizes continuous
input vectors into discrete codebook vectors, which are learned during training.
Current implementation improves over previous ones by avoiding costly matrix multiplications
and allowing for post-hoc remapping of indices.
'''
def __init__(self, config: Emu3VQVAEConfig):
pass
def forward(self, hidden_state: torch.Tensor):
pass
| 3
| 1
| 10
| 2
| 8
| 1
| 1
| 0.63
| 1
| 3
| 1
| 0
| 2
| 1
| 2
| 12
| 32
| 6
| 16
| 10
| 13
| 10
| 16
| 10
| 13
| 1
| 1
| 0
| 2
|
2,351
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/emu3/modular_emu3.py
|
transformers.models.emu3.modular_emu3.Emu3DecoderLayer
|
from ..llama.modeling_llama import LlamaAttention, LlamaDecoderLayer, LlamaForCausalLM, LlamaModel, TransformersKwargs
import torch
from .configuration_emu3 import Emu3Config, Emu3TextConfig, Emu3VQVAEConfig
from ...processing_utils import Unpack
import torch.nn.functional as F
from ...cache_utils import Cache
from typing import Optional, Union
from ...utils.deprecation import deprecate_kwarg
import torch.nn as nn
class Emu3DecoderLayer(LlamaDecoderLayer):
def __init__(self, config: Emu3Config, layer_idx: int):
super().__init__(config, layer_idx)
self.dropout = nn.Dropout(config.attention_dropout)
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, use_cache: Optional[bool]=False, cache_position: Optional[torch.LongTensor]=None, position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]]=None, **kwargs: Unpack[TransformersKwargs]) -> torch.Tensor:
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
hidden_states, _ = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, use_cache=use_cache, cache_position=cache_position, position_embeddings=position_embeddings, **kwargs)
hidden_states = residual + self.dropout(hidden_states)
residual = hidden_states
hidden_states = self.post_attention_layernorm(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + self.dropout(hidden_states)
return hidden_states
|
class Emu3DecoderLayer(LlamaDecoderLayer):
def __init__(self, config: Emu3Config, layer_idx: int):
pass
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, use_cache: Optional[bool]=False, cache_position: Optional[torch.LongTensor]=None, position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]]=None, **kwargs: Unpack[TransformersKwargs]) -> torch.Tensor:
pass
| 4
| 0
| 32
| 3
| 19
| 11
| 2
| 0.55
| 1
| 6
| 2
| 0
| 2
| 1
| 2
| 14
| 66
| 7
| 38
| 18
| 24
| 21
| 17
| 7
| 14
| 2
| 2
| 1
| 3
|
2,352
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/emu3/modular_emu3.py
|
transformers.models.emu3.modular_emu3.Emu3ForCausalLM
|
from ..llama.modeling_llama import LlamaAttention, LlamaDecoderLayer, LlamaForCausalLM, LlamaModel, TransformersKwargs
from ...generation import GenerationMixin
from .configuration_emu3 import Emu3Config, Emu3TextConfig, Emu3VQVAEConfig
class Emu3ForCausalLM(LlamaForCausalLM, Emu3PreTrainedModel, GenerationMixin):
config: Emu3TextConfig
def __init__(self, config):
super().__init__(config)
self.model = Emu3TextModel(config)
def forward(**super_kwargs):
"""
Example:
```python
>>> from transformers import Emu3Processor, Emu3ForConditionalGeneration
>>> import torch
>>> import requests
>>> from PIL import Image
>>> model = Emu3ForCausalLM.from_pretrained("BAAI/Emu3-Chat-hf", dtype=torch.bfloat16)
>>> processor = Emu3Processor.from_pretrained("BAAI/Emu3-Chat-hf")
>>> inputs = processor(text=["Can you write me a poem about winter."], return_tensors="pt").to(model.device)
>>> generated_ids = model.generate(**inputs, max_new_tokens=100, do_sample=False)
>>> processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
```"""
super().forward()
|
class Emu3ForCausalLM(LlamaForCausalLM, Emu3PreTrainedModel, GenerationMixin):
def __init__(self, config):
pass
def forward(**super_kwargs):
'''
Example:
```python
>>> from transformers import Emu3Processor, Emu3ForConditionalGeneration
>>> import torch
>>> import requests
>>> from PIL import Image
>>> model = Emu3ForCausalLM.from_pretrained("BAAI/Emu3-Chat-hf", dtype=torch.bfloat16)
>>> processor = Emu3Processor.from_pretrained("BAAI/Emu3-Chat-hf")
>>> inputs = processor(text=["Can you write me a poem about winter."], return_tensors="pt").to(model.device)
>>> generated_ids = model.generate(**inputs, max_new_tokens=100, do_sample=False)
>>> processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
```'''
pass
| 3
| 1
| 19
| 4
| 3
| 13
| 1
| 2.5
| 3
| 2
| 1
| 0
| 2
| 1
| 2
| 146
| 44
| 9
| 10
| 6
| 4
| 25
| 7
| 5
| 4
| 1
| 4
| 0
| 2
|
2,353
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/emu3/modular_emu3.py
|
transformers.models.emu3.modular_emu3.Emu3ForConditionalGeneration
|
import torch.nn.functional as F
from ...processing_utils import Unpack
from typing import Optional, Union
from ...utils import auto_docstring, can_return_tuple, logging
from ...cache_utils import Cache
from ...generation import GenerationMixin
from ..llama.modeling_llama import LlamaAttention, LlamaDecoderLayer, LlamaForCausalLM, LlamaModel, TransformersKwargs
import torch.nn as nn
import torch
from ...modeling_outputs import CausalLMOutputWithPast
class Emu3ForConditionalGeneration(Emu3PreTrainedModel, GenerationMixin):
base_model_prefix = ''
_tied_weights_keys = ['lm_head.weight']
_checkpoint_conversion_mapping = {'^text_model.model': 'model.text_model', '^vqmodel': 'model.vqmodel', '^text_model.lm_head': 'lm_head'}
def __init__(self, config):
super().__init__(config)
self.model = Emu3Model(config)
self.lm_head = nn.Linear(config.text_config.hidden_size, config.text_config.vocab_size, bias=False)
self.post_init()
def get_input_embeddings(self):
return self.model.get_input_embeddings()
def set_input_embeddings(self, value):
self.model.set_input_embeddings(value)
def get_output_embeddings(self) -> nn.Module:
return self.lm_head
def set_decoder(self, decoder):
self.model.set_decoder(decoder)
def get_decoder(self):
return self.model.get_decoder()
@property
def text_model(self):
return self.model.text_model
@property
def vqmodel(self):
return self.model.vqmodel
@property
def vocabulary_mapping(self):
return self.model.vocabulary_mapping
def decode_image_tokens(self, **kwargs):
return self.model.decode_image_tokens(**kwargs)
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, pixel_values: Optional[torch.FloatTensor]=None, image_sizes: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, labels: Optional[torch.LongTensor]=None, logits_to_keep: Union[int, torch.Tensor]=0, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple, CausalLMOutputWithPast]:
"""
image_sizes (`torch.LongTensor` of shape `(batch_size, 2)`):
The sizes of the images in the batch, being (height, width) for each image. Image sizes can be obtained using
[`AutoImageProcessor`]. See [`Emu3ImageProcessor.__call__`] for details ([]`Emu3Processor`] uses
[`Emu3ImageProcessor`] for processing images).
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Example:
```python
>>> from transformers import Emu3Processor, Emu3ForConditionalGeneration
>>> import torch
>>> import requests
>>> from PIL import Image
>>> model = Emu3ForConditionalGeneration.from_pretrained("BAAI/Emu3-Chat-hf", dtype=torch.bfloat16)
>>> processor = Emu3Processor.from_pretrained("BAAI/Emu3-Chat-hf")
>>> conversation = [
... {
... "role": "system",
... "content": [
... {"type": "text", "text": "You are a helpful assistant."},
... ],
... },
... {
... "role": "user",
... "content": [
... {"type": "image"},
... {"type": "text", "text": "Please describe the image."},
... ],
... },
... ]
>>> prompt = processor.apply_chat_template(conversation, add_generation_prompt=True)
>>> image = Image.open(requests.get("https://www.ilankelman.org/stopsigns/australia.jpg", stream=True).raw)
>>> inputs = processor(images=[image], text=[prompt], return_tensors="pt").to(model.device, torch.bfloat16)
>>> generated_ids = model.generate(**inputs, max_new_tokens=100, do_sample=False)
>>> processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
```"""
outputs = self.model(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, cache_position=cache_position, **kwargs)
hidden_states = outputs[0]
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :])
loss = None
if labels is not None:
loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.text_config.vocab_size, **kwargs)
return CausalLMOutputWithPast(loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
def prepare_inputs_for_generation(self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, cache_position=None, position_ids=None, use_cache=True, pixel_values=None, **kwargs):
model_inputs = super().prepare_inputs_for_generation(input_ids, past_key_values=past_key_values, attention_mask=attention_mask, inputs_embeds=inputs_embeds, cache_position=cache_position, position_ids=position_ids, pixel_values=pixel_values, use_cache=use_cache, **kwargs)
if cache_position[0] != 0:
model_inputs['pixel_values'] = None
return model_inputs
|
class Emu3ForConditionalGeneration(Emu3PreTrainedModel, GenerationMixin):
def __init__(self, config):
pass
def get_input_embeddings(self):
pass
def set_input_embeddings(self, value):
pass
def get_output_embeddings(self) -> nn.Module:
pass
def set_decoder(self, decoder):
pass
def get_decoder(self):
pass
@property
def text_model(self):
pass
@property
def vqmodel(self):
pass
@property
def vocabulary_mapping(self):
pass
def decode_image_tokens(self, **kwargs):
pass
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, pixel_values: Optional[torch.FloatTensor]=None, image_sizes: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, labels: Optional[torch.LongTensor]=None, logits_to_keep: Union[int, torch.Tensor]=0, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple, CausalLMOutputWithPast]:
'''
image_sizes (`torch.LongTensor` of shape `(batch_size, 2)`):
The sizes of the images in the batch, being (height, width) for each image. Image sizes can be obtained using
[`AutoImageProcessor`]. See [`Emu3ImageProcessor.__call__`] for details ([]`Emu3Processor`] uses
[`Emu3ImageProcessor`] for processing images).
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Example:
```python
>>> from transformers import Emu3Processor, Emu3ForConditionalGeneration
>>> import torch
>>> import requests
>>> from PIL import Image
>>> model = Emu3ForConditionalGeneration.from_pretrained("BAAI/Emu3-Chat-hf", dtype=torch.bfloat16)
>>> processor = Emu3Processor.from_pretrained("BAAI/Emu3-Chat-hf")
>>> conversation = [
... {
... "role": "system",
... "content": [
... {"type": "text", "text": "You are a helpful assistant."},
... ],
... },
... {
... "role": "user",
... "content": [
... {"type": "image"},
... {"type": "text", "text": "Please describe the image."},
... ],
... },
... ]
>>> prompt = processor.apply_chat_template(conversation, add_generation_prompt=True)
>>> image = Image.open(requests.get("https://www.ilankelman.org/stopsigns/australia.jpg", stream=True).raw)
>>> inputs = processor(images=[image], text=[prompt], return_tensors="pt").to(model.device, torch.bfloat16)
>>> generated_ids = model.generate(**inputs, max_new_tokens=100, do_sample=False)
>>> processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
```'''
pass
def prepare_inputs_for_generation(self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, cache_position=None, position_ids=None, use_cache=True, pixel_values=None, **kwargs):
pass
| 18
| 1
| 25
| 3
| 12
| 11
| 2
| 0.88
| 2
| 9
| 4
| 0
| 6
| 3
| 6
| 141
| 163
| 23
| 75
| 38
| 49
| 66
| 38
| 20
| 31
| 7
| 4
| 1
| 12
|
2,354
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/emu3/modular_emu3.py
|
transformers.models.emu3.modular_emu3.Emu3ImageVocabularyMapping
|
import torch.nn.functional as F
from functools import cached_property
import torch
import torch.nn as nn
class Emu3ImageVocabularyMapping:
"""
A class for mapping discrete image tokens from VQGAN to BPE tokens.
"""
def __init__(self, vocab_map):
self.vocab_map = vocab_map
self.eol_token_id = vocab_map.get('<|extra_200|>')
self.image_token_id = vocab_map.get('<image>')
@cached_property
def image_tokens(self):
return sorted([val for name, val in self.vocab_map.items() if name.startswith('<|visual token')])
@cached_property
def image_tokens_str(self):
return sorted([name for name, val in self.vocab_map.items() if name.startswith('<|visual token')])
@cached_property
def img2bpe(self):
return {int(token[-8:-2]): self.vocab_map[token] for token in self.image_tokens_str}
@cached_property
def bpe2img(self):
return {v: k for k, v in self.img2bpe.items()}
@cached_property
def bpe2img_mapping_tensor(self):
mapping = torch.zeros(max(self.bpe2img.keys()) + 1, dtype=torch.int)
for k, v in self.bpe2img.items():
mapping[k] = v
return mapping
@cached_property
def img2bpe_mapping_tensor(self):
mapping = torch.zeros(max(self.img2bpe.keys()) + 1, dtype=torch.int)
for k, v in self.img2bpe.items():
mapping[k] = v
return mapping
def convert_img2bpe(self, img_batch: list[torch.Tensor]) -> torch.Tensor:
device = img_batch.device
eol_row = torch.ones((img_batch.shape[0], 1), dtype=torch.int) * self.eol_token_id
img_tokens = self.img2bpe_mapping_tensor[img_batch.to('cpu')]
img_tokens = torch.cat([img_tokens, eol_row], dim=-1)
return img_tokens.to(device)
def convert_bpe2img(self, img_batch: torch.Tensor) -> torch.Tensor:
device = img_batch.device
img_batch = img_batch[..., :-1]
img_tokens = self.bpe2img_mapping_tensor[img_batch.to('cpu')]
return img_tokens.to(device)
|
class Emu3ImageVocabularyMapping:
'''
A class for mapping discrete image tokens from VQGAN to BPE tokens.
'''
def __init__(self, vocab_map):
pass
@cached_property
def image_tokens(self):
pass
@cached_property
def image_tokens_str(self):
pass
@cached_property
def img2bpe(self):
pass
@cached_property
def bpe2img(self):
pass
@cached_property
def bpe2img_mapping_tensor(self):
pass
@cached_property
def img2bpe_mapping_tensor(self):
pass
def convert_img2bpe(self, img_batch: list[torch.Tensor]) -> torch.Tensor:
pass
def convert_bpe2img(self, img_batch: torch.Tensor) -> torch.Tensor:
pass
| 16
| 1
| 4
| 0
| 4
| 0
| 1
| 0.1
| 0
| 2
| 0
| 0
| 9
| 3
| 9
| 9
| 52
| 9
| 40
| 30
| 24
| 4
| 34
| 22
| 24
| 2
| 0
| 1
| 11
|
2,355
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/emu3/modular_emu3.py
|
transformers.models.emu3.modular_emu3.Emu3PreTrainedModel
|
from ..chameleon.modeling_chameleon import ChameleonPreTrainedModel, ChameleonVQVAEEncoderConvDownsample
class Emu3PreTrainedModel(ChameleonPreTrainedModel, Emu3VQVAE):
_no_split_modules = ['Emu3DecoderLayer']
_supports_flex_attn = True
_supports_attention_backend = True
|
class Emu3PreTrainedModel(ChameleonPreTrainedModel, Emu3VQVAE):
pass
| 1
| 0
| 12
| 0
| 12
| 0
| 6
| 0
| 2
| 0
| 0
| 3
| 1
| 0
| 1
| 135
| 18
| 1
| 17
| 5
| 15
| 0
| 13
| 5
| 11
| 6
| 3
| 2
| 6
|
2,356
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/emu3/modular_emu3.py
|
transformers.models.emu3.modular_emu3.Emu3TextModel
|
from .configuration_emu3 import Emu3Config, Emu3TextConfig, Emu3VQVAEConfig
from ..llama.modeling_llama import LlamaAttention, LlamaDecoderLayer, LlamaForCausalLM, LlamaModel, TransformersKwargs
import torch.nn as nn
class Emu3TextModel(LlamaModel, Emu3PreTrainedModel):
_can_record_outputs = {'hidden_states': Emu3DecoderLayer, 'attentions': Emu3Attention}
def __init__(self, config: Emu3Config):
super().__init__(config)
self.layers = nn.ModuleList([Emu3DecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)])
|
class Emu3TextModel(LlamaModel, Emu3PreTrainedModel):
def __init__(self, config: Emu3Config):
pass
| 2
| 0
| 4
| 0
| 4
| 0
| 1
| 0
| 2
| 4
| 2
| 0
| 2
| 1
| 2
| 144
| 10
| 1
| 9
| 5
| 5
| 0
| 6
| 4
| 3
| 1
| 4
| 0
| 2
|
2,357
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/emu3/modular_emu3.py
|
transformers.models.emu3.modular_emu3.Emu3VQVAE
|
import math
from ...utils import auto_docstring, can_return_tuple, logging
from ...modeling_utils import PreTrainedModel
import torch
import torch.nn as nn
import torch.nn.functional as F
from .configuration_emu3 import Emu3Config, Emu3TextConfig, Emu3VQVAEConfig
@auto_docstring(custom_intro='\n The VQ-VAE model used in Emu3 for encoding/decoding images into discrete tokens.\n This model follows the "Make-a-scene: Scene-based text-to-image generation with human priors" paper from\n [ Oran Gafni, Adam Polyak, Oron Ashual, Shelly Sheynin, Devi Parikh, and Yaniv\n Taigman](https://huggingface.co/papers/2203.13131).\n ')
class Emu3VQVAE(PreTrainedModel):
config: Emu3VQVAEConfig
base_model_prefix = 'emuvideovq'
main_input_name = 'pixel_values'
_supports_sdpa = True
_supports_flash_attn = True
_supports_flex_attn = True
_supports_attention_backend = True
_no_split_modules = ['Emu3VQVAETemporalResnetBlock', 'Emu3VQVAEAttentionBlock', 'Emu3VQVAEResnetBlock', 'Emu3VQVAEVectorQuantizer']
def _init_weights(self, module):
if isinstance(module, (nn.Conv2d, nn.Conv3d)):
nn.init.kaiming_normal_(module.weight, mode='fan_out', nonlinearity='relu')
if module.bias is not None:
fan_in, _ = nn.init._calculate_fan_in_and_fan_out(module.weight)
bound = 1 / math.sqrt(fan_in)
nn.init.uniform_(module.bias, -bound, bound)
elif isinstance(module, nn.Linear):
nn.init.kaiming_uniform_(module.weight, a=math.sqrt(5))
if module.bias is not None:
fan_in, _ = nn.init._calculate_fan_in_and_fan_out(module.weight)
bound = 1 / math.sqrt(fan_in) if fan_in > 0 else 0
nn.init.uniform_(module.bias, -bound, bound)
elif isinstance(module, (nn.BatchNorm2d, nn.BatchNorm3d, nn.GroupNorm)):
nn.init.constant_(module.weight, 1.0)
nn.init.constant_(module.bias, 0.0)
elif isinstance(module, nn.Embedding):
module.weight.data.normal_()
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
def __init__(self, config: Emu3VQVAEConfig):
super().__init__(config)
self.config = config
self.encoder = Emu3VQVAEEncoder(config)
self.decoder = Emu3VQVAEDecoder(config)
self.quantize = Emu3VQVAEVectorQuantizer(config)
self.vision_spatial_factor = 2 ** (len(config.channel_multiplier) - 1)
self.quant_conv = Emu3VQVAEConv3d(config.latent_channels, config.embed_dim, kernel_size=(3, 1, 1), stride=(1, 1, 1))
self.post_quant_conv = Emu3VQVAEConv3d(config.embed_dim, config.latent_channels, kernel_size=(3, 1, 1), stride=(1, 1, 1))
self.spatial_scale_factor = 2 ** (len(config.channel_multiplier) - 1)
self.eval()
self.post_init()
def encode(self, pixel_values: torch.Tensor, image_sizes: torch.Tensor):
is_image = pixel_values.ndim == 4
if is_image:
temporal = self.config.temporal_downsample_factor
batch_size, channels, height, width = pixel_values.shape
pixel_values = pixel_values.unsqueeze(1).repeat(1, temporal, 1, 1, 1)
else:
batch_size, temporal, channels, height, width = pixel_values.shape
hidden_states = self.encoder(pixel_values)
hidden_states = hidden_states.permute(0, 2, 1, 3, 4)
hidden_states = self.quant_conv(hidden_states)
hidden_states = hidden_states.permute(0, 2, 1, 3, 4)
codes = self.quantize(hidden_states)
image_tokens = codes.squeeze(1) if is_image else codes
image_tokens = [single_image[:int(size[0] / self.vision_spatial_factor), :int(size[1] / self.vision_spatial_factor)] for single_image, size in zip(image_tokens, image_sizes)]
return image_tokens
def decode(self, hidden_states: torch.Tensor):
is_image = hidden_states.ndim == 3
if is_image:
hidden_states = hidden_states.unsqueeze(1)
batch_size, temporal, height, width = hidden_states.shape
quant = self.quantize.embedding(hidden_states.flatten())
channels = quant.shape[-1]
quant = quant.view(batch_size, temporal, height, width, channels).permute(0, 4, 1, 2, 3).contiguous()
post_quant = self.post_quant_conv(quant)
quant = quant.permute(0, 2, 1, 3, 4)
post_quant = post_quant.permute(0, 2, 1, 3, 4)
video = self.decoder(post_quant, quant)
video = video.reshape(batch_size, temporal * self.config.temporal_downsample_factor, self.config.out_channels, height * self.spatial_scale_factor, width * self.spatial_scale_factor)
return video[:, 0] if is_image else video
|
@auto_docstring(custom_intro='\n The VQ-VAE model used in Emu3 for encoding/decoding images into discrete tokens.\n This model follows the "Make-a-scene: Scene-based text-to-image generation with human priors" paper from\n [ Oran Gafni, Adam Polyak, Oron Ashual, Shelly Sheynin, Devi Parikh, and Yaniv\n Taigman](https://huggingface.co/papers/2203.13131).\n ')
class Emu3VQVAE(PreTrainedModel):
def _init_weights(self, module):
pass
def __init__(self, config: Emu3VQVAEConfig):
pass
def encode(self, pixel_values: torch.Tensor, image_sizes: torch.Tensor):
pass
def decode(self, hidden_states: torch.Tensor):
pass
| 6
| 0
| 21
| 4
| 17
| 1
| 3
| 0.04
| 1
| 9
| 5
| 1
| 4
| 8
| 4
| 133
| 97
| 18
| 77
| 31
| 72
| 3
| 56
| 31
| 51
| 6
| 2
| 2
| 13
|
2,358
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/emu3/modular_emu3.py
|
transformers.models.emu3.modular_emu3.Emu3VQVAEAttentionBlock
|
from .configuration_emu3 import Emu3Config, Emu3TextConfig, Emu3VQVAEConfig
from ..siglip.modeling_siglip import SiglipAttention
class Emu3VQVAEAttentionBlock(SiglipAttention):
def __init__(self, config: Emu3VQVAEConfig):
super().__init__(config)
self.num_key_value_groups = 1
|
class Emu3VQVAEAttentionBlock(SiglipAttention):
def __init__(self, config: Emu3VQVAEConfig):
pass
| 2
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 12
| 2
| 0
| 2
| 1
| 1
| 0
| 2
| 1
| 1
| 0
| 2
| 0
| 0
|
2,359
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/emu3/modular_emu3.py
|
transformers.models.emu3.modular_emu3.Emu3VQVAEConv3d
|
import torch.nn.functional as F
import torch.nn as nn
import torch
class Emu3VQVAEConv3d(nn.Module):
def __init__(self, in_channel: int, out_channel: int, kernel_size: tuple[int], stride: tuple[int]):
super().__init__()
padding_sizes = [one_kernel - one_stride for one_kernel, one_stride in zip(kernel_size[1:], stride[1:])]
self.padding = ()
for pad_size in padding_sizes[::-1]:
self.padding += (pad_size // 2 + pad_size % 2, pad_size // 2)
self.padding += (2, 0)
self.conv = nn.Conv3d(in_channel, out_channel, kernel_size, stride=stride)
def forward(self, hidden_states: torch.Tensor):
hidden_states = F.pad(hidden_states, self.padding)
hidden_states = self.conv(hidden_states)
return hidden_states
|
class Emu3VQVAEConv3d(nn.Module):
def __init__(self, in_channel: int, out_channel: int, kernel_size: tuple[int], stride: tuple[int]):
pass
def forward(self, hidden_states: torch.Tensor):
pass
| 3
| 0
| 13
| 1
| 12
| 0
| 2
| 0
| 1
| 4
| 0
| 0
| 2
| 2
| 2
| 12
| 27
| 3
| 24
| 13
| 15
| 0
| 13
| 7
| 10
| 2
| 1
| 1
| 3
|
2,360
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/emu3/modular_emu3.py
|
transformers.models.emu3.modular_emu3.Emu3VQVAEDecoder
|
import torch.nn as nn
import torch.nn.functional as F
import math
import torch
from .configuration_emu3 import Emu3Config, Emu3TextConfig, Emu3VQVAEConfig
class Emu3VQVAEDecoder(nn.Module):
def __init__(self, config: Emu3VQVAEConfig):
super().__init__()
quant_channels = config.embed_dim
block_in = config.base_channels * config.channel_multiplier[-1]
self.time_res_stack = nn.ModuleList()
for _ in range(config.num_res_blocks):
time_res_conv = Emu3VQVAETemporalResnetBlock(in_channels=config.latent_channels, out_channels=config.latent_channels)
self.time_res_stack.append(time_res_conv)
temp_upsample_block_num = int(math.log2(config.temporal_downsample_factor))
self.time_conv = nn.ModuleList()
for i in range(temp_upsample_block_num):
conv = Emu3VQVAETemporalUpsample(config.latent_channels, config.latent_channels)
self.time_conv.append(conv)
self.conv_in = nn.Conv2d(config.latent_channels, block_in, kernel_size=3, stride=1, padding=1)
self.middle_block = Emu3VQVAEMiddleBlock(config, block_in, quant_channels=quant_channels)
self.up_block = Emu3VQVAEUpBlock(config)
block_in = config.base_channels * config.channel_multiplier[0]
self.norm_out = Emu3VQVAESpatialNorm(quant_channels, block_in)
self.conv_out = nn.Conv2d(block_in, config.out_channels, kernel_size=3, stride=1, padding=1)
def forward(self, hidden_states: torch.Tensor, quant_states: torch.Tensor):
hidden_quant_states = torch.cat((hidden_states, quant_states), dim=0)
hidden_quant_states = hidden_quant_states.permute(0, 2, 1, 3, 4)
for layer in self.time_res_stack:
hidden_quant_states = layer(hidden_quant_states)
for layer in self.time_conv:
hidden_quant_states = layer(hidden_quant_states)
hidden_quant_states *= torch.sigmoid(hidden_quant_states)
hidden_quant_states = hidden_quant_states.permute(0, 2, 1, 3, 4)
hidden_states, quant_states = torch.chunk(hidden_quant_states, 2, dim=0)
hidden_states = hidden_states.reshape(-1, *hidden_states.shape[2:])
quant_states = quant_states.reshape(-1, *quant_states.shape[2:])
hidden_states = self.conv_in(hidden_states)
hidden_states = self.middle_block(hidden_states, quant_states)
hidden_states = self.up_block(hidden_states, quant_states)
hidden_states = self.norm_out(hidden_states, quant_states)
hidden_states *= torch.sigmoid(hidden_states)
hidden_states = self.conv_out(hidden_states)
return hidden_states
|
class Emu3VQVAEDecoder(nn.Module):
def __init__(self, config: Emu3VQVAEConfig):
pass
def forward(self, hidden_states: torch.Tensor, quant_states: torch.Tensor):
pass
| 3
| 0
| 33
| 6
| 26
| 1
| 3
| 0.04
| 1
| 10
| 6
| 0
| 2
| 7
| 2
| 12
| 68
| 13
| 53
| 19
| 50
| 2
| 39
| 19
| 36
| 3
| 1
| 1
| 6
|
2,361
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/emu3/modular_emu3.py
|
transformers.models.emu3.modular_emu3.Emu3VQVAEDownBlock
|
import torch.nn.functional as F
import torch.nn as nn
import torch
class Emu3VQVAEDownBlock(nn.Module):
def __init__(self, config):
super().__init__()
self.num_resolutions = len(config.channel_multiplier)
self.num_res_blocks = config.num_res_blocks
base_channels = config.base_channels
channel_multiplier = config.channel_multiplier
in_channel_multiplier = (1,) + tuple(channel_multiplier)
self.in_channel_multiplier = in_channel_multiplier
self.down = nn.ModuleList()
for i_level in range(self.num_resolutions):
block = nn.ModuleList()
attn = nn.ModuleList()
attn_norms = nn.ModuleList()
block_in = base_channels * in_channel_multiplier[i_level]
block_out = base_channels * channel_multiplier[i_level]
for i_block in range(self.num_res_blocks):
block.append(Emu3VQVAEResnetBlock(in_channels=block_in, out_channels=block_out))
block_in = block_out
if config.attn_resolutions is not None and i_level in config.attn_resolutions:
attn.append(Emu3VQVAEAttentionBlock(config))
attn_norms.append(nn.GroupNorm(num_channels=block_in, num_groups=32, eps=1e-06, affine=True))
down = nn.Module()
down.block = block
down.attn = attn
down.attn_norms = attn_norms
if i_level != self.num_resolutions - 1:
down.downsample = Emu3VQVAEEncoderConvDownsample(block_in)
self.down.append(down)
def forward(self, hidden_states: torch.FloatTensor):
for i_level, blocks in enumerate(self.down):
for i_block in range(self.num_res_blocks):
hidden_states = blocks.block[i_block](hidden_states)
if len(blocks.attn) > 0:
residual = hidden_states
hidden_states = blocks.attn_norms[i_block](hidden_states)
batch_size, channels, height, width = hidden_states.shape
hidden_states = hidden_states.view(batch_size, channels, height * width).transpose(1, 2)
hidden_states = blocks.attn[i_block](hidden_states)[0]
hidden_states = hidden_states.reshape(batch_size, height, width, channels).permute(0, 3, 1, 2)
hidden_states = residual + hidden_states
if i_level != self.num_resolutions - 1:
hidden_states = blocks.downsample(hidden_states)
return hidden_states
|
class Emu3VQVAEDownBlock(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states: torch.FloatTensor):
pass
| 3
| 0
| 28
| 4
| 24
| 0
| 5
| 0
| 1
| 7
| 3
| 0
| 2
| 4
| 2
| 12
| 57
| 8
| 49
| 22
| 46
| 0
| 44
| 22
| 41
| 5
| 1
| 3
| 10
|
2,362
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/emu3/modular_emu3.py
|
transformers.models.emu3.modular_emu3.Emu3VQVAEEncoder
|
import torch.nn as nn
import torch
import math
import torch.nn.functional as F
class Emu3VQVAEEncoder(nn.Module):
def __init__(self, config):
super().__init__()
base_channels = config.base_channels
in_channels = config.in_channels
double_latent = config.double_latent
latent_channels = config.latent_channels
channel_multiplier = config.channel_multiplier
out_channels = 2 * latent_channels if double_latent else latent_channels
block_in = base_channels * channel_multiplier[-1]
self.conv_in = torch.nn.Conv2d(in_channels, base_channels, kernel_size=3, stride=1, padding=1)
self.down_block = Emu3VQVAEDownBlock(config)
self.middle_block = Emu3VQVAEMiddleBlock(config, block_in)
self.norm_out = torch.nn.GroupNorm(num_groups=32, num_channels=block_in, eps=1e-06, affine=True)
self.conv_out = torch.nn.Conv2d(block_in, out_channels, kernel_size=3, stride=1, padding=1)
temporal_down_blocks = int(math.log2(config.temporal_downsample_factor))
self.time_conv = nn.ModuleList()
self.time_res_stack = nn.ModuleList()
for i in range(temporal_down_blocks):
conv = Emu3VQVAETemporalDownsample(out_channels, out_channels)
self.time_conv.append(conv)
for _ in range(config.num_res_blocks):
time_res_conv = Emu3VQVAETemporalResnetBlock(in_channels=out_channels, out_channels=out_channels)
self.time_res_stack.append(time_res_conv)
def forward(self, pixel_values: torch.LongTensor):
temporal_dim = pixel_values.shape[1]
pixel_values = pixel_values.reshape(-1, *pixel_values.shape[2:])
hidden_states = self.conv_in(pixel_values)
hidden_states = self.down_block(hidden_states)
hidden_states = self.middle_block(hidden_states)
hidden_states = self.norm_out(hidden_states)
hidden_states *= torch.sigmoid(hidden_states)
hidden_states = self.conv_out(hidden_states)
hidden_states = hidden_states.reshape(-1, temporal_dim, *hidden_states.shape[1:])
hidden_states = hidden_states.permute(0, 2, 1, 3, 4)
for conv in self.time_conv:
hidden_states = conv(hidden_states)
hidden_states *= torch.sigmoid(hidden_states)
for layer in self.time_res_stack:
hidden_states = layer(hidden_states)
hidden_states = hidden_states.permute(0, 2, 1, 3, 4)
return hidden_states
|
class Emu3VQVAEEncoder(nn.Module):
def __init__(self, config):
pass
def forward(self, pixel_values: torch.LongTensor):
pass
| 3
| 0
| 33
| 7
| 25
| 2
| 4
| 0.06
| 1
| 7
| 4
| 0
| 2
| 7
| 2
| 12
| 68
| 14
| 51
| 26
| 48
| 3
| 42
| 26
| 39
| 4
| 1
| 1
| 7
|
2,363
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/emu3/modular_emu3.py
|
transformers.models.emu3.modular_emu3.Emu3VQVAEEncoderConvDownsample
|
from ..chameleon.modeling_chameleon import ChameleonPreTrainedModel, ChameleonVQVAEEncoderConvDownsample
class Emu3VQVAEEncoderConvDownsample(ChameleonVQVAEEncoderConvDownsample):
pass
|
class Emu3VQVAEEncoderConvDownsample(ChameleonVQVAEEncoderConvDownsample):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 12
| 2
| 0
| 2
| 1
| 1
| 0
| 2
| 1
| 1
| 0
| 2
| 0
| 0
|
2,364
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/emu3/modular_emu3.py
|
transformers.models.emu3.modular_emu3.Emu3VQVAEEncoderConvUpsample
|
import torch.nn.functional as F
import torch.nn as nn
class Emu3VQVAEEncoderConvUpsample(nn.Module):
def __init__(self, in_channels):
super().__init__()
self.conv = nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1)
def forward(self, hidden_states):
hidden_states = F.interpolate(hidden_states, scale_factor=2.0, mode='nearest')
hidden_states = self.conv(hidden_states)
return hidden_states
|
class Emu3VQVAEEncoderConvUpsample(nn.Module):
def __init__(self, in_channels):
pass
def forward(self, hidden_states):
pass
| 3
| 0
| 4
| 0
| 4
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 2
| 1
| 2
| 12
| 9
| 1
| 8
| 4
| 5
| 0
| 8
| 4
| 5
| 1
| 1
| 0
| 2
|
2,365
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/emu3/modular_emu3.py
|
transformers.models.emu3.modular_emu3.Emu3VQVAEGroupNorm
|
import torch.nn.functional as F
import torch.nn as nn
class Emu3VQVAEGroupNorm(nn.GroupNorm):
"""
Same as the torch GroupNorm with the only difference that this ones accepts
an optional kwarg `quant_states` which is not used. This class makes it easier to
use SpatialNorm or GroupNorm without conditionals
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
def forward(self, input, quant_states=None):
return F.group_norm(input, self.num_groups, self.weight, self.bias, self.eps)
|
class Emu3VQVAEGroupNorm(nn.GroupNorm):
'''
Same as the torch GroupNorm with the only difference that this ones accepts
an optional kwarg `quant_states` which is not used. This class makes it easier to
use SpatialNorm or GroupNorm without conditionals
'''
def __init__(self, **kwargs):
pass
def forward(self, input, quant_states=None):
pass
| 3
| 1
| 2
| 0
| 2
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 2
| 0
| 2
| 2
| 12
| 2
| 5
| 3
| 2
| 5
| 5
| 3
| 2
| 1
| 1
| 0
| 2
|
2,366
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/emu3/modular_emu3.py
|
transformers.models.emu3.modular_emu3.Emu3VQVAEMiddleBlock
|
import torch.nn.functional as F
from typing import Optional, Union
import torch
import torch.nn as nn
class Emu3VQVAEMiddleBlock(nn.Module):
def __init__(self, config, in_channels, quant_channels=None):
super().__init__()
self.block_1 = Emu3VQVAEResnetBlock(in_channels=in_channels, out_channels=in_channels, quant_channels=quant_channels)
self.attn_1 = Emu3VQVAEAttentionBlock(config)
if quant_channels is None:
self.attn_norm = Emu3VQVAEGroupNorm(num_channels=in_channels, num_groups=32, eps=1e-06, affine=True)
else:
self.attn_norm = Emu3VQVAESpatialNorm(quant_channels, in_channels)
self.block_2 = Emu3VQVAEResnetBlock(in_channels=in_channels, out_channels=in_channels, quant_channels=quant_channels)
def forward(self, hidden_states: torch.FloatTensor, quant_states: Optional[torch.FloatTensor]=None):
hidden_states = self.block_1(hidden_states, quant_states)
residual = hidden_states
hidden_states = self.attn_norm(hidden_states, quant_states)
batch_size, channels, height, width = hidden_states.shape
hidden_states = hidden_states.view(batch_size, channels, height * width).transpose(1, 2)
hidden_states = self.attn_1(hidden_states)[0]
hidden_states = hidden_states.reshape(batch_size, height, width, channels).permute(0, 3, 1, 2)
hidden_states = residual + hidden_states
hidden_states = self.block_2(hidden_states, quant_states)
return hidden_states
|
class Emu3VQVAEMiddleBlock(nn.Module):
def __init__(self, config, in_channels, quant_channels=None):
pass
def forward(self, hidden_states: torch.FloatTensor, quant_states: Optional[torch.FloatTensor]=None):
pass
| 3
| 0
| 15
| 1
| 14
| 0
| 2
| 0
| 1
| 5
| 4
| 0
| 2
| 4
| 2
| 12
| 32
| 3
| 29
| 9
| 26
| 0
| 20
| 9
| 17
| 2
| 1
| 1
| 3
|
2,367
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/emu3/modular_emu3.py
|
transformers.models.emu3.modular_emu3.Emu3VQVAEResnetBlock
|
import torch
import torch.nn as nn
from typing import Optional, Union
import torch.nn.functional as F
class Emu3VQVAEResnetBlock(nn.Module):
def __init__(self, in_channels: int, out_channels: Optional[int]=None, quant_channels: Optional[int]=None):
super().__init__()
self.in_channels = in_channels
out_channels = in_channels if out_channels is None else out_channels
self.out_channels = out_channels
self.quant_channels = quant_channels
if quant_channels is None:
self.norm1 = nn.GroupNorm(num_channels=in_channels, num_groups=32, eps=1e-06, affine=True)
self.norm2 = nn.GroupNorm(num_channels=out_channels, num_groups=32, eps=1e-06, affine=True)
else:
self.norm1 = Emu3VQVAESpatialNorm(quant_channels, in_channels)
self.norm2 = Emu3VQVAESpatialNorm(quant_channels, out_channels)
self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1)
self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1)
if self.in_channels != self.out_channels:
self.nin_shortcut = nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, padding=0)
def forward(self, hidden_states: torch.Tensor, quant_channels: Optional[torch.Tensor]=None):
norm_args = () if self.quant_channels is None else (quant_channels,)
residual = hidden_states
hidden_states = self.norm1(hidden_states, *norm_args)
hidden_states *= torch.sigmoid(hidden_states)
hidden_states = self.conv1(hidden_states)
hidden_states = self.norm2(hidden_states, *norm_args)
hidden_states *= torch.sigmoid(hidden_states)
hidden_states = self.conv2(hidden_states)
if self.in_channels != self.out_channels:
residual = self.nin_shortcut(residual)
return residual + hidden_states
|
class Emu3VQVAEResnetBlock(nn.Module):
def __init__(self, in_channels: int, out_channels: Optional[int]=None, quant_channels: Optional[int]=None):
pass
def forward(self, hidden_states: torch.Tensor, quant_channels: Optional[torch.Tensor]=None):
pass
| 3
| 0
| 30
| 4
| 26
| 0
| 4
| 0
| 1
| 4
| 1
| 0
| 2
| 8
| 2
| 12
| 61
| 9
| 52
| 18
| 44
| 0
| 28
| 13
| 25
| 4
| 1
| 1
| 7
|
2,368
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/emu3/modular_emu3.py
|
transformers.models.emu3.modular_emu3.Emu3VQVAESpatialNorm
|
import torch.nn.functional as F
import torch.nn as nn
import torch
class Emu3VQVAESpatialNorm(nn.Module):
def __init__(self, in_channels: int, out_channels: int):
super().__init__()
self.norm_layer = nn.GroupNorm(num_channels=out_channels, num_groups=32, eps=1e-06, affine=True)
self.conv_y = nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, padding=0)
self.conv_b = nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, padding=0)
def forward(self, hidden_states: torch.Tensor, quant_states: torch.Tensor):
quant_states = F.interpolate(quant_states, size=hidden_states.shape[-2:], mode='nearest')
hidden_states = self.norm_layer(hidden_states)
hidden_states = hidden_states * self.conv_y(quant_states) + self.conv_b(quant_states)
return hidden_states
|
class Emu3VQVAESpatialNorm(nn.Module):
def __init__(self, in_channels: int, out_channels: int):
pass
def forward(self, hidden_states: torch.Tensor, quant_states: torch.Tensor):
pass
| 3
| 0
| 16
| 1
| 16
| 0
| 1
| 0
| 1
| 3
| 0
| 0
| 2
| 3
| 2
| 12
| 34
| 2
| 32
| 10
| 25
| 0
| 11
| 6
| 8
| 1
| 1
| 0
| 2
|
2,369
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/emu3/modular_emu3.py
|
transformers.models.emu3.modular_emu3.Emu3VQVAETemporalDownsample
|
import torch.nn.functional as F
import torch
import torch.nn as nn
class Emu3VQVAETemporalDownsample(nn.Module):
def __init__(self, in_channel: int, out_channel: int):
super().__init__()
self.conv = Emu3VQVAEConv3d(in_channel, out_channel, kernel_size=(4, 3, 3), stride=(2, 1, 1))
def forward(self, hidden_states: torch.Tensor):
hidden_states = self.conv(hidden_states)
return hidden_states
|
class Emu3VQVAETemporalDownsample(nn.Module):
def __init__(self, in_channel: int, out_channel: int):
pass
def forward(self, hidden_states: torch.Tensor):
pass
| 3
| 0
| 8
| 0
| 8
| 0
| 1
| 0
| 1
| 4
| 1
| 0
| 2
| 1
| 2
| 12
| 17
| 1
| 16
| 8
| 9
| 0
| 7
| 4
| 4
| 1
| 1
| 0
| 2
|
2,370
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/emu3/modular_emu3.py
|
transformers.models.emu3.modular_emu3.Emu3VQVAETemporalResnetBlock
|
import torch.nn as nn
import torch.nn.functional as F
import torch
class Emu3VQVAETemporalResnetBlock(nn.Module):
def __init__(self, in_channels, out_channels=None):
super().__init__()
self.in_channels = in_channels
self.out_channels = in_channels if out_channels is None else out_channels
self.norm1 = nn.BatchNorm3d(in_channels)
self.conv1 = Emu3VQVAEConv3d(in_channels, out_channels, kernel_size=(3, 3, 3), stride=(1, 1, 1))
self.norm2 = nn.BatchNorm3d(out_channels)
self.conv2 = Emu3VQVAEConv3d(out_channels, out_channels, kernel_size=(3, 3, 3), stride=(1, 1, 1))
if self.in_channels != self.out_channels:
self.nin_shortcut = nn.Conv3d(in_channels, out_channels, kernel_size=1, stride=1, padding=0)
def forward(self, hidden_states):
residual = hidden_states
hidden_states = self.norm1(hidden_states)
hidden_states *= torch.sigmoid(hidden_states)
hidden_states = self.conv1(hidden_states)
hidden_states = self.norm2(hidden_states)
hidden_states *= torch.sigmoid(hidden_states)
hidden_states = self.conv2(hidden_states)
if self.in_channels != self.out_channels:
residual = self.nin_shortcut(residual)
return residual + hidden_states
|
class Emu3VQVAETemporalResnetBlock(nn.Module):
def __init__(self, in_channels, out_channels=None):
pass
def forward(self, hidden_states):
pass
| 3
| 0
| 23
| 2
| 21
| 0
| 3
| 0
| 1
| 2
| 1
| 0
| 2
| 7
| 2
| 12
| 47
| 5
| 42
| 15
| 35
| 0
| 22
| 11
| 19
| 3
| 1
| 1
| 5
|
2,371
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/emu3/modular_emu3.py
|
transformers.models.emu3.modular_emu3.Emu3VQVAETemporalUpsample
|
import torch.nn as nn
import torch
import torch.nn.functional as F
class Emu3VQVAETemporalUpsample(nn.Module):
def __init__(self, in_channel: int, out_channel: int):
super().__init__()
self.conv = Emu3VQVAEConv3d(in_channel, out_channel, kernel_size=(3, 3, 3), stride=(1, 1, 1))
def forward(self, hidden_states: torch.Tensor):
batch_size, channels, temporal, height, width = hidden_states.shape
hidden_states = hidden_states.permute(0, 1, 3, 4, 2).contiguous().view(batch_size, -1, temporal)
hidden_states = F.interpolate(hidden_states, scale_factor=2.0, mode='nearest')
hidden_states = hidden_states.view(batch_size, channels, height, width, -1).permute(0, 1, 4, 2, 3).contiguous()
hidden_states = self.conv(hidden_states)
return hidden_states
|
class Emu3VQVAETemporalUpsample(nn.Module):
def __init__(self, in_channel: int, out_channel: int):
pass
def forward(self, hidden_states: torch.Tensor):
pass
| 3
| 0
| 10
| 0
| 10
| 0
| 1
| 0
| 1
| 4
| 1
| 0
| 2
| 1
| 2
| 12
| 21
| 1
| 20
| 9
| 13
| 0
| 11
| 5
| 8
| 1
| 1
| 0
| 2
|
2,372
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/emu3/modular_emu3.py
|
transformers.models.emu3.modular_emu3.Emu3VQVAEUpBlock
|
import torch.nn as nn
import torch
import torch.nn.functional as F
class Emu3VQVAEUpBlock(nn.Module):
def __init__(self, config):
super().__init__()
self.num_resolutions = len(config.channel_multiplier)
self.num_res_blocks = config.num_res_blocks
quant_channels = config.embed_dim
block_in = config.base_channels * config.channel_multiplier[-1]
self.up = nn.ModuleList()
for i_level in reversed(range(self.num_resolutions)):
block = nn.ModuleList()
attn = nn.ModuleList()
attn_norms = nn.ModuleList()
block_out = config.base_channels * config.channel_multiplier[i_level]
for i_block in range(self.num_res_blocks + 1):
block.append(Emu3VQVAEResnetBlock(in_channels=block_in, out_channels=block_out, quant_channels=quant_channels))
block_in = block_out
if i_level in config.attn_resolutions:
attn.append(Emu3VQVAEAttentionBlock(config))
attn_norms.append(Emu3VQVAESpatialNorm(quant_channels, block_in))
up = nn.Module()
up.block = block
up.attn = attn
up.attn_norms = attn_norms
if i_level != 0:
up.upsample = Emu3VQVAEEncoderConvUpsample(block_in)
self.up.insert(0, up)
def forward(self, hidden_states: torch.FloatTensor, quant_states: torch.FloatTensor):
for i_level, blocks in enumerate(self.up[::-1]):
for i_block in range(self.num_res_blocks + 1):
hidden_states = blocks.block[i_block](hidden_states, quant_states)
if len(blocks.attn) > 0:
residual = hidden_states
hidden_states = blocks.attn_norms[i_block](hidden_states, quant_states)
batch_size, channels, height, width = hidden_states.shape
hidden_states = hidden_states.view(batch_size, channels, height * width).transpose(1, 2)
hidden_states = blocks.attn[i_block](hidden_states)[0]
hidden_states = hidden_states.reshape(batch_size, height, width, channels).permute(0, 3, 1, 2)
hidden_states = residual + hidden_states
if i_level != len(self.up) - 1:
hidden_states = blocks.upsample(hidden_states)
return hidden_states
|
class Emu3VQVAEUpBlock(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states: torch.FloatTensor, quant_states: torch.FloatTensor):
pass
| 3
| 0
| 27
| 4
| 23
| 0
| 5
| 0
| 1
| 8
| 4
| 0
| 2
| 3
| 2
| 12
| 56
| 9
| 47
| 19
| 44
| 0
| 41
| 19
| 38
| 5
| 1
| 3
| 10
|
2,373
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/emu3/modular_emu3.py
|
transformers.models.emu3.modular_emu3.Emu3VQVAEVectorQuantizer
|
from .configuration_emu3 import Emu3Config, Emu3TextConfig, Emu3VQVAEConfig
import torch.nn.functional as F
import torch
import torch.nn as nn
class Emu3VQVAEVectorQuantizer(nn.Module):
"""
A module for vector quantization using learned embedding vectors.
This module implements the quantization process similar to te one described in
the VQ-VAE (Vector Quantized Variational AutoEncoder) paper. It quantizes continuous
input vectors into discrete codebook vectors, which are learned during training.
Current implementation improves over previous ones by avoiding costly matrix multiplications
and allowing for post-hoc remapping of indices.
"""
def __init__(self, config: Emu3VQVAEConfig):
super().__init__()
self.embedding = nn.Embedding(config.codebook_size, config.embed_dim)
self.embedding.weight.data.uniform_(-1.0 / config.codebook_size, 1.0 / config.codebook_size)
def forward(self, hidden_state: torch.Tensor):
batch_size, temporal, channels, height, width = hidden_state.shape
hidden_state = hidden_state.permute(0, 1, 3, 4, 2).contiguous()
hidden_state_flattened = hidden_state.view(-1, channels)
hidden_state_sum = torch.sum(hidden_state_flattened ** 2, dim=1, keepdim=True)
embedding_sum = torch.sum(self.embedding.weight ** 2, dim=1)
distances = 2 * torch.matmul(hidden_state_flattened, self.embedding.weight.transpose(0, 1))
distances = hidden_state_sum + embedding_sum - distances
min_encoding_indices = torch.argmin(distances, dim=1)
min_encoding_indices = min_encoding_indices.view(batch_size, temporal, height, width)
return min_encoding_indices
|
class Emu3VQVAEVectorQuantizer(nn.Module):
'''
A module for vector quantization using learned embedding vectors.
This module implements the quantization process similar to te one described in
the VQ-VAE (Vector Quantized Variational AutoEncoder) paper. It quantizes continuous
input vectors into discrete codebook vectors, which are learned during training.
Current implementation improves over previous ones by avoiding costly matrix multiplications
and allowing for post-hoc remapping of indices.
'''
def __init__(self, config: Emu3VQVAEConfig):
pass
def forward(self, hidden_state: torch.Tensor):
pass
| 3
| 1
| 10
| 2
| 8
| 1
| 1
| 0.63
| 1
| 3
| 1
| 0
| 2
| 1
| 2
| 12
| 32
| 6
| 16
| 10
| 13
| 10
| 16
| 10
| 13
| 1
| 1
| 0
| 2
|
2,374
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/emu3/processing_emu3.py
|
transformers.models.emu3.processing_emu3.Emu3ImagesKwargs
|
from ...processing_utils import ImagesKwargs, MultiModalData, ProcessingKwargs, ProcessorMixin, TextKwargs, Unpack
class Emu3ImagesKwargs(ImagesKwargs, total=False):
ratio: str
image_area: int
|
class Emu3ImagesKwargs(ImagesKwargs, total=False):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3
| 0
| 3
| 1
| 2
| 0
| 3
| 1
| 2
| 0
| 2
| 0
| 0
|
2,375
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/emu3/processing_emu3.py
|
transformers.models.emu3.processing_emu3.Emu3Processor
|
from ...processing_utils import ImagesKwargs, MultiModalData, ProcessingKwargs, ProcessorMixin, TextKwargs, Unpack
import numpy as np
from ...tokenization_utils_base import PreTokenizedInput, TextInput
from ...image_utils import ImageInput
from typing import Optional, Union
from ...image_processing_utils import BatchFeature
class Emu3Processor(ProcessorMixin):
"""
Constructs a Emu3 processor which wraps a Emu3 image processor and a GPT2 tokenizer into a single
processor.
[`Emu3Processor`] offers all the functionalities of [`Emu3ImageProcessor`] and [`GPT2TokenizerFast`].
See the [`~Emu3Processor.__call__`] and [`~Emu3Processor.decode`] for more information.
Args:
image_processor ([`Emu3ImageProcessor`]):
The image processor is a required input.
tokenizer ([`Emu3TokenizerFast`]):
The tokenizer is a required input.
chat_template (`str`, *optional*): A Jinja template which will be used to convert lists of messages
in a chat into a tokenizable string.
"""
attributes = ['image_processor', 'tokenizer']
tokenizer_class = ('GPT2Tokenizer', 'GPT2TokenizerFast')
image_processor_class = 'Emu3ImageProcessor'
def __init__(self, image_processor, tokenizer, chat_template=None, **kwargs):
self.image_token = tokenizer.image_token
self.image_token_id = tokenizer.image_token_id
self.image_start_token = tokenizer.boi_token
self.image_end_token = tokenizer.eoi_token
self.fake_token_around_image = tokenizer.image_wrapper_token
self.eof_token = tokenizer.eof_token
self.bos_token = tokenizer.bos_token
self.downsample_ratio = 8
super().__init__(image_processor, tokenizer, chat_template=chat_template)
def __call__(self, images: Optional[ImageInput]=None, text: Optional[Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]]]=None, audio=None, videos=None, **kwargs: Unpack[Emu3ProcessorKwargs]) -> BatchFeature:
"""
Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text`
and `kwargs` arguments to Emu3TokenizerFast's [`~Emu3TokenizerFast.__call__`] if `text` is not `None` to encode
the text. To prepare the image(s), this method forwards the `images` and `kwargs` arguments to
CLIPImageProcessor's [`~CLIPImageProcessor.__call__`] if `images` is not `None`. Please refer to the docstring
of the above two methods for more information.
Args:
images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `list[PIL.Image.Image]`, `list[np.ndarray]`, `list[torch.Tensor]`):
The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
tensor. Both channels-first and channels-last formats are supported.
text (`str`, `list[str]`, `list[list[str]]`):
The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
(pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
`is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors of a particular framework. Acceptable values are:
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return NumPy `np.ndarray` objects.
Returns:
[`BatchFeature`]: A [`BatchFeature`] with the following fields:
- **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`.
- **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
`return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not
`None`).
- **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`.
"""
if isinstance(text, str):
text = [text]
elif not isinstance(text, list) and (not isinstance(text[0], str)):
raise TypeError('Invalid input text. Please provide a string, or a list of strings')
output_kwargs = self._merge_kwargs(Emu3ProcessorKwargs, tokenizer_init_kwargs=self.tokenizer.init_kwargs, **kwargs)
return_for_image_generation = output_kwargs['text_kwargs'].pop('return_for_image_generation', False)
ratio = output_kwargs['images_kwargs'].pop('ratio', None)
image_area = output_kwargs['images_kwargs'].pop('image_area', None)
if return_for_image_generation and images is not None:
raise ValueError('You should not provide `images` when `return_for_image_generation=True`')
if not return_for_image_generation and text is None and (images is None):
raise ValueError('You must provide either text or images when `return_for_image_generation=False`')
image_features = {}
image_start_tokens = f'{self.image_start_token}'
image_end_tokens = f'{self.eof_token}{self.image_end_token}'
if not return_for_image_generation and images is not None:
image_features = self.image_processor(images, **output_kwargs['images_kwargs'])
image_sizes = iter(image_features.image_sizes)
prompt_strings = []
for sample in text:
while self.image_token in sample:
image_size = next(image_sizes)
height, width = image_size
height = height // self.downsample_ratio
width = width // self.downsample_ratio
image_seq_length = height * (width + 1)
image_placeholder = f"{image_start_tokens}{height}*{width}{self.fake_token_around_image}{'<placeholder>' * image_seq_length}{image_end_tokens}"
sample = sample.replace(self.image_token, image_placeholder, 1)
sample = f'{self.bos_token}{sample}'
prompt_strings.append(sample)
text = [sample.replace('<placeholder>', self.image_token) for sample in prompt_strings]
elif return_for_image_generation:
height, width = self.calculate_generate_size(ratio, image_area, self.downsample_ratio)
image_prompt = f'{image_start_tokens}{height}*{width}{self.fake_token_around_image}'
text = [f'{self.bos_token}{sample}{image_prompt}' for sample in text]
image_features['image_sizes'] = [[height, width]] * len(text)
return_tensors = output_kwargs['text_kwargs'].pop('return_tensors', None)
return_mm_token_type_ids = output_kwargs['text_kwargs'].pop('return_mm_token_type_ids', False)
text_inputs = self.tokenizer(text, **output_kwargs['text_kwargs'], return_tensors=None)
self._check_special_mm_tokens(text, text_inputs, modalities=['image'])
if return_mm_token_type_ids:
array_ids = np.array(text_inputs['input_ids'])
mm_token_type_ids = np.zeros_like(text_inputs['input_ids'])
mm_token_type_ids[array_ids == self.image_token_id] = 1
text_inputs['mm_token_type_ids'] = mm_token_type_ids.tolist()
return BatchFeature(data={**text_inputs, **image_features}, tensor_type=return_tensors)
def _get_num_multimodal_tokens(self, image_sizes=None, **kwargs):
"""
Computes the number of placeholder tokens needed for multimodal inputs with the given sizes.
Args:
image_sizes (`list[list[int]]`, *optional*):
The input sizes formatted as (height, width) per each image.
Returns:
`MultiModalData`: A `MultiModalData` object holding number of tokens per each of the provided
input modalities, along with other useful data.
"""
vision_data = {}
if image_sizes is not None:
num_image_tokens = []
for height, width in image_sizes:
height, width = smart_resize(height, width, self.image_processor.spatial_factor, self.image_processor.min_pixels, self.image_processor.max_pixels)
height = height // self.downsample_ratio
width = width // self.downsample_ratio
image_seq_length = height * (width + 1)
num_image_tokens.append(image_seq_length)
num_image_patches = [1] * len(image_sizes)
vision_data.update({'num_image_tokens': num_image_tokens, 'num_image_patches': num_image_patches})
return MultiModalData(**vision_data)
def calculate_generate_size(self, ratio, image_area, spatial_factor):
width, height = map(int, ratio.split(':'))
current_area = width * height
target_ratio = (image_area / current_area) ** 0.5
token_height = int(round(height * target_ratio / spatial_factor))
token_width = int(round(width * target_ratio / spatial_factor))
return (token_height, token_width)
def postprocess(self, images: ImageInput, **kwargs):
return self.image_processor.postprocess(images, **kwargs)
|
class Emu3Processor(ProcessorMixin):
'''
Constructs a Emu3 processor which wraps a Emu3 image processor and a GPT2 tokenizer into a single
processor.
[`Emu3Processor`] offers all the functionalities of [`Emu3ImageProcessor`] and [`GPT2TokenizerFast`].
See the [`~Emu3Processor.__call__`] and [`~Emu3Processor.decode`] for more information.
Args:
image_processor ([`Emu3ImageProcessor`]):
The image processor is a required input.
tokenizer ([`Emu3TokenizerFast`]):
The tokenizer is a required input.
chat_template (`str`, *optional*): A Jinja template which will be used to convert lists of messages
in a chat into a tokenizable string.
'''
def __init__(self, image_processor, tokenizer, chat_template=None, **kwargs):
pass
def __call__(self, images: Optional[ImageInput]=None, text: Optional[Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]]]=None, audio=None, videos=None, **kwargs: Unpack[Emu3ProcessorKwargs]) -> BatchFeature:
'''
Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text`
and `kwargs` arguments to Emu3TokenizerFast's [`~Emu3TokenizerFast.__call__`] if `text` is not `None` to encode
the text. To prepare the image(s), this method forwards the `images` and `kwargs` arguments to
CLIPImageProcessor's [`~CLIPImageProcessor.__call__`] if `images` is not `None`. Please refer to the docstring
of the above two methods for more information.
Args:
images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `list[PIL.Image.Image]`, `list[np.ndarray]`, `list[torch.Tensor]`):
The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
tensor. Both channels-first and channels-last formats are supported.
text (`str`, `list[str]`, `list[list[str]]`):
The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
(pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
`is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors of a particular framework. Acceptable values are:
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return NumPy `np.ndarray` objects.
Returns:
[`BatchFeature`]: A [`BatchFeature`] with the following fields:
- **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`.
- **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
`return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not
`None`).
- **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`.
'''
pass
def _get_num_multimodal_tokens(self, image_sizes=None, **kwargs):
'''
Computes the number of placeholder tokens needed for multimodal inputs with the given sizes.
Args:
image_sizes (`list[list[int]]`, *optional*):
The input sizes formatted as (height, width) per each image.
Returns:
`MultiModalData`: A `MultiModalData` object holding number of tokens per each of the provided
input modalities, along with other useful data.
'''
pass
def calculate_generate_size(self, ratio, image_area, spatial_factor):
pass
def postprocess(self, images: ImageInput, **kwargs):
pass
| 6
| 3
| 20
| 2
| 12
| 7
| 2
| 0.68
| 1
| 10
| 2
| 0
| 7
| 7
| 7
| 24
| 167
| 26
| 88
| 55
| 66
| 60
| 68
| 41
| 60
| 9
| 2
| 3
| 15
|
2,376
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/emu3/processing_emu3.py
|
transformers.models.emu3.processing_emu3.Emu3ProcessorKwargs
|
from ...processing_utils import ImagesKwargs, MultiModalData, ProcessingKwargs, ProcessorMixin, TextKwargs, Unpack
class Emu3ProcessorKwargs(ProcessingKwargs, total=False):
text_kwargs: Emu3TextKwargs
images_kwargs: Emu3ImagesKwargs
_defaults = {'text_kwargs': {'return_for_image_generation': False, 'return_mm_token_type_ids': False}, 'images_kwargs': {'ratio': '1:1', 'image_area': 518400}}
|
class Emu3ProcessorKwargs(ProcessingKwargs, total=False):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 12
| 0
| 12
| 2
| 11
| 0
| 4
| 2
| 3
| 0
| 3
| 0
| 0
|
2,377
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/emu3/processing_emu3.py
|
transformers.models.emu3.processing_emu3.Emu3TextKwargs
|
from ...processing_utils import ImagesKwargs, MultiModalData, ProcessingKwargs, ProcessorMixin, TextKwargs, Unpack
class Emu3TextKwargs(TextKwargs, total=False):
return_for_image_generation: bool
|
class Emu3TextKwargs(TextKwargs, total=False):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2
| 0
| 2
| 1
| 1
| 0
| 2
| 1
| 1
| 0
| 2
| 0
| 0
|
2,378
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/encodec/configuration_encodec.py
|
transformers.models.encodec.configuration_encodec.EncodecConfig
|
from typing import Optional
from ...configuration_utils import PretrainedConfig
import numpy as np
import math
class EncodecConfig(PretrainedConfig):
"""
This is the configuration class to store the configuration of an [`EncodecModel`]. It is used to instantiate a
Encodec model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the
[facebook/encodec_24khz](https://huggingface.co/facebook/encodec_24khz) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
target_bandwidths (`list[float]`, *optional*, defaults to `[1.5, 3.0, 6.0, 12.0, 24.0]`):
The range of different bandwidths the model can encode audio with.
sampling_rate (`int`, *optional*, defaults to 24000):
The sampling rate at which the audio waveform should be digitalized expressed in hertz (Hz).
audio_channels (`int`, *optional*, defaults to 1):
Number of channels in the audio data. Either 1 for mono or 2 for stereo.
normalize (`bool`, *optional*, defaults to `False`):
Whether the audio shall be normalized when passed.
chunk_length_s (`float`, *optional*):
If defined the audio is pre-processed into chunks of lengths `chunk_length_s` and then encoded.
overlap (`float`, *optional*):
Defines the overlap between each chunk. It is used to compute the `chunk_stride` using the following
formulae : `int((1.0 - self.overlap) * self.chunk_length)`.
hidden_size (`int`, *optional*, defaults to 128):
Intermediate representation dimension.
num_filters (`int`, *optional*, defaults to 32):
Number of convolution kernels of first `EncodecConv1d` down sampling layer.
num_residual_layers (`int`, *optional*, defaults to 1):
Number of residual layers.
upsampling_ratios (`Sequence[int]` , *optional*, defaults to `[8, 5, 4, 2]`):
Kernel size and stride ratios. The encoder uses downsampling ratios instead of upsampling ratios, hence it
will use the ratios in the reverse order to the ones specified here that must match the decoder order.
norm_type (`str`, *optional*, defaults to `"weight_norm"`):
Normalization method. Should be in `["weight_norm", "time_group_norm"]`
kernel_size (`int`, *optional*, defaults to 7):
Kernel size for the initial convolution.
last_kernel_size (`int`, *optional*, defaults to 7):
Kernel size for the last convolution layer.
residual_kernel_size (`int`, *optional*, defaults to 3):
Kernel size for the residual layers.
dilation_growth_rate (`int`, *optional*, defaults to 2):
How much to increase the dilation with each layer.
use_causal_conv (`bool`, *optional*, defaults to `True`):
Whether to use fully causal convolution.
pad_mode (`str`, *optional*, defaults to `"reflect"`):
Padding mode for the convolutions.
compress (`int`, *optional*, defaults to 2):
Reduced dimensionality in residual branches (from Demucs v3).
num_lstm_layers (`int`, *optional*, defaults to 2):
Number of LSTM layers at the end of the encoder.
trim_right_ratio (`float`, *optional*, defaults to 1.0):
Ratio for trimming at the right of the transposed convolution under the `use_causal_conv = True` setup. If
equal to 1.0, it means that all the trimming is done at the right.
codebook_size (`int`, *optional*, defaults to 1024):
Number of discret codes that make up VQVAE.
codebook_dim (`int`, *optional*):
Dimension of the codebook vectors. If not defined, uses `hidden_size`.
use_conv_shortcut (`bool`, *optional*, defaults to `True`):
Whether to use a convolutional layer as the 'skip' connection in the `EncodecResnetBlock` block. If False,
an identity function will be used, giving a generic residual connection.
Example:
```python
>>> from transformers import EncodecModel, EncodecConfig
>>> # Initializing a "facebook/encodec_24khz" style configuration
>>> configuration = EncodecConfig()
>>> # Initializing a model (with random weights) from the "facebook/encodec_24khz" style configuration
>>> model = EncodecModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = 'encodec'
def __init__(self, target_bandwidths=[1.5, 3.0, 6.0, 12.0, 24.0], sampling_rate=24000, audio_channels=1, normalize=False, chunk_length_s=None, overlap=None, hidden_size=128, num_filters=32, num_residual_layers=1, upsampling_ratios=[8, 5, 4, 2], norm_type='weight_norm', kernel_size=7, last_kernel_size=7, residual_kernel_size=3, dilation_growth_rate=2, use_causal_conv=True, pad_mode='reflect', compress=2, num_lstm_layers=2, trim_right_ratio=1.0, codebook_size=1024, codebook_dim=None, use_conv_shortcut=True, **kwargs):
self.target_bandwidths = target_bandwidths
self.sampling_rate = sampling_rate
self.audio_channels = audio_channels
self.normalize = normalize
self.chunk_length_s = chunk_length_s
self.overlap = overlap
self.hidden_size = hidden_size
self.num_filters = num_filters
self.num_residual_layers = num_residual_layers
self.upsampling_ratios = upsampling_ratios
self.norm_type = norm_type
self.kernel_size = kernel_size
self.last_kernel_size = last_kernel_size
self.residual_kernel_size = residual_kernel_size
self.dilation_growth_rate = dilation_growth_rate
self.use_causal_conv = use_causal_conv
self.pad_mode = pad_mode
self.compress = compress
self.num_lstm_layers = num_lstm_layers
self.trim_right_ratio = trim_right_ratio
self.codebook_size = codebook_size
self.codebook_dim = codebook_dim if codebook_dim is not None else hidden_size
self.use_conv_shortcut = use_conv_shortcut
if self.norm_type not in ['weight_norm', 'time_group_norm']:
raise ValueError(f'self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}')
super().__init__(**kwargs)
@property
def chunk_length(self) -> Optional[int]:
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate)
@property
def chunk_stride(self) -> Optional[int]:
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1, int((1.0 - self.overlap) * self.chunk_length))
@property
def hop_length(self) -> int:
return int(np.prod(self.upsampling_ratios))
@property
def codebook_nbits(self) -> int:
return math.ceil(math.log2(self.codebook_size))
@property
def frame_rate(self) -> int:
return math.ceil(self.sampling_rate / self.hop_length)
@property
def num_quantizers(self) -> int:
return int(1000 * self.target_bandwidths[-1] // (self.frame_rate * self.codebook_nbits))
|
class EncodecConfig(PretrainedConfig):
'''
This is the configuration class to store the configuration of an [`EncodecModel`]. It is used to instantiate a
Encodec model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the
[facebook/encodec_24khz](https://huggingface.co/facebook/encodec_24khz) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
target_bandwidths (`list[float]`, *optional*, defaults to `[1.5, 3.0, 6.0, 12.0, 24.0]`):
The range of different bandwidths the model can encode audio with.
sampling_rate (`int`, *optional*, defaults to 24000):
The sampling rate at which the audio waveform should be digitalized expressed in hertz (Hz).
audio_channels (`int`, *optional*, defaults to 1):
Number of channels in the audio data. Either 1 for mono or 2 for stereo.
normalize (`bool`, *optional*, defaults to `False`):
Whether the audio shall be normalized when passed.
chunk_length_s (`float`, *optional*):
If defined the audio is pre-processed into chunks of lengths `chunk_length_s` and then encoded.
overlap (`float`, *optional*):
Defines the overlap between each chunk. It is used to compute the `chunk_stride` using the following
formulae : `int((1.0 - self.overlap) * self.chunk_length)`.
hidden_size (`int`, *optional*, defaults to 128):
Intermediate representation dimension.
num_filters (`int`, *optional*, defaults to 32):
Number of convolution kernels of first `EncodecConv1d` down sampling layer.
num_residual_layers (`int`, *optional*, defaults to 1):
Number of residual layers.
upsampling_ratios (`Sequence[int]` , *optional*, defaults to `[8, 5, 4, 2]`):
Kernel size and stride ratios. The encoder uses downsampling ratios instead of upsampling ratios, hence it
will use the ratios in the reverse order to the ones specified here that must match the decoder order.
norm_type (`str`, *optional*, defaults to `"weight_norm"`):
Normalization method. Should be in `["weight_norm", "time_group_norm"]`
kernel_size (`int`, *optional*, defaults to 7):
Kernel size for the initial convolution.
last_kernel_size (`int`, *optional*, defaults to 7):
Kernel size for the last convolution layer.
residual_kernel_size (`int`, *optional*, defaults to 3):
Kernel size for the residual layers.
dilation_growth_rate (`int`, *optional*, defaults to 2):
How much to increase the dilation with each layer.
use_causal_conv (`bool`, *optional*, defaults to `True`):
Whether to use fully causal convolution.
pad_mode (`str`, *optional*, defaults to `"reflect"`):
Padding mode for the convolutions.
compress (`int`, *optional*, defaults to 2):
Reduced dimensionality in residual branches (from Demucs v3).
num_lstm_layers (`int`, *optional*, defaults to 2):
Number of LSTM layers at the end of the encoder.
trim_right_ratio (`float`, *optional*, defaults to 1.0):
Ratio for trimming at the right of the transposed convolution under the `use_causal_conv = True` setup. If
equal to 1.0, it means that all the trimming is done at the right.
codebook_size (`int`, *optional*, defaults to 1024):
Number of discret codes that make up VQVAE.
codebook_dim (`int`, *optional*):
Dimension of the codebook vectors. If not defined, uses `hidden_size`.
use_conv_shortcut (`bool`, *optional*, defaults to `True`):
Whether to use a convolutional layer as the 'skip' connection in the `EncodecResnetBlock` block. If False,
an identity function will be used, giving a generic residual connection.
Example:
```python
>>> from transformers import EncodecModel, EncodecConfig
>>> # Initializing a "facebook/encodec_24khz" style configuration
>>> configuration = EncodecConfig()
>>> # Initializing a model (with random weights) from the "facebook/encodec_24khz" style configuration
>>> model = EncodecModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```'''
def __init__(self, target_bandwidths=[1.5, 3.0, 6.0, 12.0, 24.0], sampling_rate=24000, audio_channels=1, normalize=False, chunk_length_s=None, overlap=None, hidden_size=128, num_filters=32, num_residual_layers=1, upsampling_ratios=[8, 5, 4, 2], norm_type='weight_norm', kernel_size=7, last_kernel_size=7, residual_kernel_size=3, dilation_growth_rate=2, use_causal_conv=True, pad_mode='reflect', compress=2, num_lstm_layers=2, trim_right_ratio=1.0, codebook_size=1024, codebook_dim=None, use_conv_shortcut=True, **kwargs):
pass
@property
def chunk_length(self) -> Optional[int]:
pass
@property
def chunk_stride(self) -> Optional[int]:
pass
@property
def hop_length(self) -> int:
pass
@property
def codebook_nbits(self) -> int:
pass
@property
def frame_rate(self) -> int:
pass
@property
def num_quantizers(self) -> int:
pass
| 14
| 1
| 14
| 0
| 14
| 0
| 2
| 0.92
| 1
| 3
| 0
| 0
| 5
| 23
| 5
| 5
| 161
| 15
| 76
| 61
| 40
| 70
| 42
| 31
| 36
| 3
| 1
| 1
| 9
|
2,379
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/encodec/feature_extraction_encodec.py
|
transformers.models.encodec.feature_extraction_encodec.EncodecFeatureExtractor
|
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...utils import PaddingStrategy, TensorType, logging
from typing import Optional, Union
import numpy as np
from ...feature_extraction_utils import BatchFeature
class EncodecFeatureExtractor(SequenceFeatureExtractor):
"""
Constructs an EnCodec feature extractor.
This feature extractor inherits from [`~feature_extraction_sequence_utils.SequenceFeatureExtractor`] which contains
most of the main methods. Users should refer to this superclass for more information regarding those methods.
Instantiating a feature extractor with the defaults will yield a similar configuration to that of the
[facebook/encodec_24khz](https://huggingface.co/facebook/encodec_24khz) architecture.
Args:
feature_size (`int`, *optional*, defaults to 1):
The feature dimension of the extracted features. Use 1 for mono, 2 for stereo.
sampling_rate (`int`, *optional*, defaults to 24000):
The sampling rate at which the audio waveform should be digitalized expressed in hertz (Hz).
padding_value (`float`, *optional*, defaults to 0.0):
The value that is used to fill the padding values.
chunk_length_s (`float`, *optional*):
If defined the audio is pre-processed into chunks of lengths `chunk_length_s` and then encoded.
overlap (`float`, *optional*):
Defines the overlap between each chunk. It is used to compute the `chunk_stride` using the following
formulae : `int((1.0 - self.overlap) * self.chunk_length)`.
"""
model_input_names = ['input_values', 'padding_mask']
def __init__(self, feature_size: int=1, sampling_rate: int=24000, padding_value: float=0.0, chunk_length_s: Optional[float]=None, overlap: Optional[float]=None, **kwargs):
super().__init__(feature_size=feature_size, sampling_rate=sampling_rate, padding_value=padding_value, **kwargs)
self.chunk_length_s = chunk_length_s
self.overlap = overlap
@property
def chunk_length(self) -> Optional[int]:
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate)
@property
def chunk_stride(self) -> Optional[int]:
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1, int((1.0 - self.overlap) * self.chunk_length))
def __call__(self, raw_audio: Union[np.ndarray, list[float], list[np.ndarray], list[list[float]]], padding: Optional[Union[bool, str, PaddingStrategy]]=None, truncation: Optional[bool]=False, max_length: Optional[int]=None, return_tensors: Optional[Union[str, TensorType]]=None, sampling_rate: Optional[int]=None) -> BatchFeature:
"""
Main method to featurize and prepare for the model one or several sequence(s).
Args:
raw_audio (`np.ndarray`, `list[float]`, `list[np.ndarray]`, `list[list[float]]`):
The sequence or batch of sequences to be processed. Each sequence can be a numpy array, a list of float
values, a list of numpy arrays or a list of list of float values. The numpy array must be of shape
`(num_samples,)` for mono audio (`feature_size = 1`), or `(2, num_samples)` for stereo audio
(`feature_size = 2`).
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`):
Select a strategy to pad the returned sequences (according to the model's padding side and padding
index) among:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
sequence if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, *optional*, defaults to `False`):
Activates truncation to cut input sequences longer than `max_length` to `max_length`.
max_length (`int`, *optional*):
Maximum length of the returned list and optionally padding length (see above).
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
sampling_rate (`int`, *optional*):
The sampling rate at which the `audio` input was sampled. It is strongly recommended to pass
`sampling_rate` at the forward call to prevent silent errors.
"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(f'The model corresponding to this feature extractor: {self} was trained using a sampling rate of {self.sampling_rate}. Please make sure that the provided audio input was sampled with {self.sampling_rate} and not {sampling_rate}.')
else:
logger.warning(f'It is strongly recommended to pass the `sampling_rate` argument to `{self.__class__.__name__}()`. Failing to do so can result in silent errors that might be hard to debug.')
if padding and truncation:
raise ValueError('Both padding and truncation were set. Make sure you only set one.')
elif padding is None:
padding = True
is_batched = bool(isinstance(raw_audio, (list, tuple)) and isinstance(raw_audio[0], (np.ndarray, tuple, list)))
if is_batched:
raw_audio = [np.asarray(audio, dtype=np.float32).T for audio in raw_audio]
elif not is_batched and (not isinstance(raw_audio, np.ndarray)):
raw_audio = np.asarray(raw_audio, dtype=np.float32)
elif isinstance(raw_audio, np.ndarray) and raw_audio.dtype is np.dtype(np.float64):
raw_audio = raw_audio.astype(np.float32)
if not is_batched:
raw_audio = [np.asarray(raw_audio).T]
for idx, example in enumerate(raw_audio):
if example.ndim > 2:
raise ValueError(f'Expected input shape (channels, length) but got shape {example.shape}')
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(f'Expected mono audio but example has {example.shape[-1]} channels')
if self.feature_size == 2 and example.shape[-1] != 2:
raise ValueError(f'Expected stereo audio but example has {example.shape[-1]} channels')
padded_inputs = None
input_values = BatchFeature({'input_values': raw_audio})
if self.chunk_stride is not None and self.chunk_length is not None and (max_length is None):
if truncation:
max_length = min((array.shape[0] for array in raw_audio))
nb_step = int(np.floor(max_length / self.chunk_stride))
max_length = (nb_step - 1) * self.chunk_stride + self.chunk_length
elif padding:
max_length = max((array.shape[0] for array in raw_audio))
nb_step = int(np.ceil(max_length / self.chunk_stride))
max_length = (nb_step - 1) * self.chunk_stride + self.chunk_length
padding = 'max_length'
else:
padded_inputs = input_values
if padded_inputs is None:
padded_inputs = self.pad(input_values, max_length=max_length, truncation=truncation, padding=padding, return_attention_mask=padding)
if padding:
padded_inputs['padding_mask'] = padded_inputs.pop('attention_mask')
input_values = []
for example in padded_inputs.pop('input_values'):
if self.feature_size == 1:
example = example[..., None]
input_values.append(example.T)
padded_inputs['input_values'] = input_values
if return_tensors is not None:
padded_inputs = padded_inputs.convert_to_tensors(return_tensors)
return padded_inputs
|
class EncodecFeatureExtractor(SequenceFeatureExtractor):
'''
Constructs an EnCodec feature extractor.
This feature extractor inherits from [`~feature_extraction_sequence_utils.SequenceFeatureExtractor`] which contains
most of the main methods. Users should refer to this superclass for more information regarding those methods.
Instantiating a feature extractor with the defaults will yield a similar configuration to that of the
[facebook/encodec_24khz](https://huggingface.co/facebook/encodec_24khz) architecture.
Args:
feature_size (`int`, *optional*, defaults to 1):
The feature dimension of the extracted features. Use 1 for mono, 2 for stereo.
sampling_rate (`int`, *optional*, defaults to 24000):
The sampling rate at which the audio waveform should be digitalized expressed in hertz (Hz).
padding_value (`float`, *optional*, defaults to 0.0):
The value that is used to fill the padding values.
chunk_length_s (`float`, *optional*):
If defined the audio is pre-processed into chunks of lengths `chunk_length_s` and then encoded.
overlap (`float`, *optional*):
Defines the overlap between each chunk. It is used to compute the `chunk_stride` using the following
formulae : `int((1.0 - self.overlap) * self.chunk_length)`.
'''
def __init__(self, feature_size: int=1, sampling_rate: int=24000, padding_value: float=0.0, chunk_length_s: Optional[float]=None, overlap: Optional[float]=None, **kwargs):
pass
@property
def chunk_length(self) -> Optional[int]:
pass
@property
def chunk_stride(self) -> Optional[int]:
pass
def __call__(self, raw_audio: Union[np.ndarray, list[float], list[np.ndarray], list[list[float]]], padding: Optional[Union[bool, str, PaddingStrategy]]=None, truncation: Optional[bool]=False, max_length: Optional[int]=None, return_tensors: Optional[Union[str, TensorType]]=None, sampling_rate: Optional[int]=None) -> BatchFeature:
'''
Main method to featurize and prepare for the model one or several sequence(s).
Args:
raw_audio (`np.ndarray`, `list[float]`, `list[np.ndarray]`, `list[list[float]]`):
The sequence or batch of sequences to be processed. Each sequence can be a numpy array, a list of float
values, a list of numpy arrays or a list of list of float values. The numpy array must be of shape
`(num_samples,)` for mono audio (`feature_size = 1`), or `(2, num_samples)` for stereo audio
(`feature_size = 2`).
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`):
Select a strategy to pad the returned sequences (according to the model's padding side and padding
index) among:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
sequence if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, *optional*, defaults to `False`):
Activates truncation to cut input sequences longer than `max_length` to `max_length`.
max_length (`int`, *optional*):
Maximum length of the returned list and optionally padding length (see above).
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
sampling_rate (`int`, *optional*):
The sampling rate at which the `audio` input was sampled. It is strongly recommended to pass
`sampling_rate` at the forward call to prevent silent errors.
'''
pass
| 7
| 2
| 36
| 3
| 25
| 9
| 7
| 0.54
| 1
| 10
| 1
| 0
| 4
| 2
| 4
| 21
| 178
| 21
| 102
| 31
| 79
| 55
| 61
| 13
| 56
| 21
| 3
| 2
| 26
|
2,380
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/encodec/modeling_encodec.py
|
transformers.models.encodec.modeling_encodec.EncodecConv1d
|
import torch
from torch import nn
class EncodecConv1d(nn.Module):
"""Conv1d with asymmetric or causal padding and normalization."""
def __init__(self, config, in_channels: int, out_channels: int, kernel_size: int, stride: int=1, dilation: int=1):
super().__init__()
self.causal = config.use_causal_conv
self.pad_mode = config.pad_mode
self.norm_type = config.norm_type
if self.norm_type not in ['weight_norm', 'time_group_norm']:
raise ValueError(f'self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}')
if stride > 1 and dilation > 1:
logger.warning(f'EncodecConv1d has been initialized with stride > 1 and dilation > 1 (kernel_size={kernel_size} stride={stride}, dilation={dilation}).')
self.conv = nn.Conv1d(in_channels, out_channels, kernel_size, stride, dilation=dilation)
weight_norm = nn.utils.weight_norm
if hasattr(nn.utils.parametrizations, 'weight_norm'):
weight_norm = nn.utils.parametrizations.weight_norm
if self.norm_type == 'weight_norm':
self.conv = weight_norm(self.conv)
elif self.norm_type == 'time_group_norm':
self.norm = nn.GroupNorm(1, out_channels)
kernel_size = self.conv.kernel_size[0]
stride = torch.tensor(self.conv.stride[0], dtype=torch.int64)
dilation = self.conv.dilation[0]
kernel_size = torch.tensor((kernel_size - 1) * dilation + 1, dtype=torch.int64)
self.register_buffer('stride', stride, persistent=False)
self.register_buffer('kernel_size', kernel_size, persistent=False)
self.register_buffer('padding_total', kernel_size - stride, persistent=False)
def _get_extra_padding_for_conv1d(self, hidden_states: torch.Tensor) -> torch.Tensor:
"""See `pad_for_conv1d`."""
length = hidden_states.shape[-1]
n_frames = (length - self.kernel_size + self.padding_total) / self.stride + 1
n_frames = torch.ceil(n_frames).to(torch.int64) - 1
ideal_length = n_frames * self.stride + self.kernel_size - self.padding_total
return ideal_length - length
@staticmethod
def _pad1d(hidden_states: torch.Tensor, paddings: tuple[int, int], mode: str='zero', value: float=0.0):
"""Tiny wrapper around torch.nn.functional.pad, just to allow for reflect padding on small input.
If this is the case, we insert extra 0 padding to the right before the reflection happens.
"""
length = hidden_states.shape[-1]
padding_left, padding_right = paddings
if mode != 'reflect':
return nn.functional.pad(hidden_states, paddings, mode, value)
max_pad = max(padding_left, padding_right)
extra_pad = 0
if length <= max_pad:
extra_pad = max_pad - length + 1
hidden_states = nn.functional.pad(hidden_states, (0, extra_pad))
padded = nn.functional.pad(hidden_states, paddings, mode, value)
end = padded.shape[-1] - extra_pad
return padded[..., :end]
def forward(self, hidden_states):
extra_padding = self._get_extra_padding_for_conv1d(hidden_states)
if self.causal:
hidden_states = self._pad1d(hidden_states, (self.padding_total, extra_padding), mode=self.pad_mode)
else:
padding_right = self.padding_total // 2
padding_left = self.padding_total - padding_right
hidden_states = self._pad1d(hidden_states, (padding_left, padding_right + extra_padding), mode=self.pad_mode)
hidden_states = self.conv(hidden_states)
if self.norm_type == 'time_group_norm':
hidden_states = self.norm(hidden_states)
return hidden_states
|
class EncodecConv1d(nn.Module):
'''Conv1d with asymmetric or causal padding and normalization.'''
def __init__(self, config, in_channels: int, out_channels: int, kernel_size: int, stride: int=1, dilation: int=1):
pass
def _get_extra_padding_for_conv1d(self, hidden_states: torch.Tensor) -> torch.Tensor:
'''See `pad_for_conv1d`.'''
pass
@staticmethod
def _pad1d(hidden_states: torch.Tensor, paddings: tuple[int, int], mode: str='zero', value: float=0.0):
'''Tiny wrapper around torch.nn.functional.pad, just to allow for reflect padding on small input.
If this is the case, we insert extra 0 padding to the right before the reflection happens.
'''
pass
def forward(self, hidden_states):
pass
| 6
| 3
| 22
| 3
| 17
| 2
| 3
| 0.13
| 1
| 6
| 0
| 0
| 3
| 6
| 4
| 14
| 95
| 17
| 69
| 30
| 58
| 9
| 54
| 23
| 49
| 6
| 1
| 1
| 13
|
2,381
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/encodec/modeling_encodec.py
|
transformers.models.encodec.modeling_encodec.EncodecConvTranspose1d
|
from torch import nn
import math
class EncodecConvTranspose1d(nn.Module):
"""ConvTranspose1d with asymmetric or causal padding and normalization."""
def __init__(self, config, in_channels: int, out_channels: int, kernel_size: int, stride: int=1):
super().__init__()
self.causal = config.use_causal_conv
self.trim_right_ratio = config.trim_right_ratio
self.norm_type = config.norm_type
if self.norm_type not in ['weight_norm', 'time_group_norm']:
raise ValueError(f'self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}')
self.conv = nn.ConvTranspose1d(in_channels, out_channels, kernel_size, stride)
weight_norm = nn.utils.weight_norm
if hasattr(nn.utils.parametrizations, 'weight_norm'):
weight_norm = nn.utils.parametrizations.weight_norm
if config.norm_type == 'weight_norm':
self.conv = weight_norm(self.conv)
elif config.norm_type == 'time_group_norm':
self.norm = nn.GroupNorm(1, out_channels)
if not (self.causal or self.trim_right_ratio == 1.0):
raise ValueError('`trim_right_ratio` != 1.0 only makes sense for causal convolutions')
def forward(self, hidden_states):
kernel_size = self.conv.kernel_size[0]
stride = self.conv.stride[0]
padding_total = kernel_size - stride
hidden_states = self.conv(hidden_states)
if self.norm_type == 'time_group_norm':
hidden_states = self.norm(hidden_states)
if self.causal:
padding_right = math.ceil(padding_total * self.trim_right_ratio)
else:
padding_right = padding_total // 2
padding_left = padding_total - padding_right
end = hidden_states.shape[-1] - padding_right
hidden_states = hidden_states[..., padding_left:end]
return hidden_states
|
class EncodecConvTranspose1d(nn.Module):
'''ConvTranspose1d with asymmetric or causal padding and normalization.'''
def __init__(self, config, in_channels: int, out_channels: int, kernel_size: int, stride: int=1):
pass
def forward(self, hidden_states):
pass
| 3
| 1
| 26
| 5
| 17
| 4
| 5
| 0.26
| 1
| 3
| 0
| 0
| 2
| 5
| 2
| 12
| 55
| 11
| 35
| 15
| 32
| 9
| 31
| 15
| 28
| 6
| 1
| 1
| 9
|
2,382
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/encodec/modeling_encodec.py
|
transformers.models.encodec.modeling_encodec.EncodecDecoder
|
from torch import nn
from .configuration_encodec import EncodecConfig
class EncodecDecoder(nn.Module):
"""SEANet decoder as used by EnCodec."""
def __init__(self, config: EncodecConfig):
super().__init__()
scaling = int(2 ** len(config.upsampling_ratios))
model = [EncodecConv1d(config, config.hidden_size, scaling * config.num_filters, config.kernel_size)]
model += [EncodecLSTM(config, scaling * config.num_filters)]
for ratio in config.upsampling_ratios:
current_scale = scaling * config.num_filters
model += [nn.ELU()]
model += [EncodecConvTranspose1d(config, current_scale, current_scale // 2, kernel_size=ratio * 2, stride=ratio)]
for j in range(config.num_residual_layers):
model += [EncodecResnetBlock(config, current_scale // 2, (config.dilation_growth_rate ** j, 1))]
scaling //= 2
model += [nn.ELU()]
model += [EncodecConv1d(config, config.num_filters, config.audio_channels, config.last_kernel_size)]
self.layers = nn.ModuleList(model)
def forward(self, hidden_states):
for layer in self.layers:
hidden_states = layer(hidden_states)
return hidden_states
|
class EncodecDecoder(nn.Module):
'''SEANet decoder as used by EnCodec.'''
def __init__(self, config: EncodecConfig):
pass
def forward(self, hidden_states):
pass
| 3
| 1
| 14
| 2
| 11
| 2
| 3
| 0.23
| 1
| 8
| 5
| 0
| 2
| 1
| 2
| 12
| 32
| 5
| 22
| 10
| 19
| 5
| 20
| 10
| 17
| 3
| 1
| 2
| 5
|
2,383
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/encodec/modeling_encodec.py
|
transformers.models.encodec.modeling_encodec.EncodecDecoderOutput
|
from typing import Optional, Union
from dataclasses import dataclass
import torch
from ...utils import ModelOutput, auto_docstring, logging
@dataclass
@auto_docstring
class EncodecDecoderOutput(ModelOutput):
"""
audio_values (`torch.FloatTensor` of shape `(batch_size, segment_length)`, *optional*):
Decoded audio values, obtained using the decoder part of Encodec.
"""
audio_values: Optional[torch.FloatTensor] = None
|
@dataclass
@auto_docstring
class EncodecDecoderOutput(ModelOutput):
'''
audio_values (`torch.FloatTensor` of shape `(batch_size, segment_length)`, *optional*):
Decoded audio values, obtained using the decoder part of Encodec.
'''
pass
| 3
| 1
| 0
| 0
| 0
| 0
| 0
| 2.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 8
| 1
| 2
| 2
| 1
| 5
| 2
| 2
| 1
| 0
| 1
| 0
| 0
|
2,384
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/encodec/modeling_encodec.py
|
transformers.models.encodec.modeling_encodec.EncodecEncoder
|
from torch import nn
from .configuration_encodec import EncodecConfig
class EncodecEncoder(nn.Module):
"""SEANet encoder as used by EnCodec."""
def __init__(self, config: EncodecConfig):
super().__init__()
model = [EncodecConv1d(config, config.audio_channels, config.num_filters, config.kernel_size)]
scaling = 1
for ratio in reversed(config.upsampling_ratios):
current_scale = scaling * config.num_filters
for j in range(config.num_residual_layers):
model += [EncodecResnetBlock(config, current_scale, [config.dilation_growth_rate ** j, 1])]
model += [nn.ELU()]
model += [EncodecConv1d(config, current_scale, current_scale * 2, kernel_size=ratio * 2, stride=ratio)]
scaling *= 2
model += [EncodecLSTM(config, scaling * config.num_filters)]
model += [nn.ELU()]
model += [EncodecConv1d(config, scaling * config.num_filters, config.hidden_size, config.last_kernel_size)]
self.layers = nn.ModuleList(model)
def forward(self, hidden_states):
for layer in self.layers:
hidden_states = layer(hidden_states)
return hidden_states
|
class EncodecEncoder(nn.Module):
'''SEANet encoder as used by EnCodec.'''
def __init__(self, config: EncodecConfig):
pass
def forward(self, hidden_states):
pass
| 3
| 1
| 13
| 2
| 10
| 2
| 3
| 0.2
| 1
| 7
| 4
| 0
| 2
| 1
| 2
| 12
| 29
| 5
| 20
| 10
| 17
| 4
| 20
| 10
| 17
| 3
| 1
| 2
| 5
|
2,385
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/encodec/modeling_encodec.py
|
transformers.models.encodec.modeling_encodec.EncodecEncoderOutput
|
from dataclasses import dataclass
from typing import Optional, Union
import torch
from ...utils import ModelOutput, auto_docstring, logging
@dataclass
@auto_docstring
class EncodecEncoderOutput(ModelOutput):
"""
audio_codes (`torch.LongTensor` of shape `(nb_frames, batch_size, nb_quantizers, frame_len)`, *optional*):
Discrete code embeddings computed using `model.encode`.
audio_scales (list of length `nb_frames` of `torch.Tensor` of shape `(batch_size, 1)`, *optional*):
Scaling factor for each `audio_codes` input. This is used to unscale each chunk of audio when decoding.
last_frame_pad_length (`int`, *optional*):
The length of the padding in the last frame, if any. This is used to ensure that the encoded frames can be
outputted as a tensor. This value should be passed during decoding to ensure padding is removed from the
encoded frames.
"""
audio_codes: Optional[torch.LongTensor] = None
audio_scales: Optional[torch.FloatTensor] = None
last_frame_pad_length: Optional[int] = None
|
@dataclass
@auto_docstring
class EncodecEncoderOutput(ModelOutput):
'''
audio_codes (`torch.LongTensor` of shape `(nb_frames, batch_size, nb_quantizers, frame_len)`, *optional*):
Discrete code embeddings computed using `model.encode`.
audio_scales (list of length `nb_frames` of `torch.Tensor` of shape `(batch_size, 1)`, *optional*):
Scaling factor for each `audio_codes` input. This is used to unscale each chunk of audio when decoding.
last_frame_pad_length (`int`, *optional*):
The length of the padding in the last frame, if any. This is used to ensure that the encoded frames can be
outputted as a tensor. This value should be passed during decoding to ensure padding is removed from the
encoded frames.
'''
pass
| 3
| 1
| 0
| 0
| 0
| 0
| 0
| 2.33
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 11
| 1
| 3
| 3
| 2
| 7
| 3
| 3
| 2
| 0
| 1
| 0
| 0
|
2,386
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/encodec/modeling_encodec.py
|
transformers.models.encodec.modeling_encodec.EncodecEuclideanCodebook
|
import torch
from .configuration_encodec import EncodecConfig
from torch import nn
class EncodecEuclideanCodebook(nn.Module):
"""Codebook with Euclidean distance."""
def __init__(self, config: EncodecConfig):
super().__init__()
embed = torch.zeros(config.codebook_size, config.codebook_dim)
self.codebook_size = config.codebook_size
self.register_buffer('inited', torch.Tensor([True]))
self.register_buffer('cluster_size', torch.zeros(config.codebook_size))
self.register_buffer('embed', embed)
self.register_buffer('embed_avg', embed.clone())
def quantize(self, hidden_states):
embed = self.embed.t()
scaled_states = hidden_states.pow(2).sum(1, keepdim=True)
dist = -(scaled_states - 2 * hidden_states @ embed + embed.pow(2).sum(0, keepdim=True))
embed_ind = dist.max(dim=-1).indices
return embed_ind
def encode(self, hidden_states):
shape = hidden_states.shape
hidden_states = hidden_states.reshape((-1, shape[-1]))
embed_ind = self.quantize(hidden_states)
embed_ind = embed_ind.view(*shape[:-1])
return embed_ind
def decode(self, embed_ind):
quantize = nn.functional.embedding(embed_ind, self.embed)
return quantize
|
class EncodecEuclideanCodebook(nn.Module):
'''Codebook with Euclidean distance.'''
def __init__(self, config: EncodecConfig):
pass
def quantize(self, hidden_states):
pass
def encode(self, hidden_states):
pass
def decode(self, embed_ind):
pass
| 5
| 1
| 7
| 1
| 6
| 1
| 1
| 0.17
| 1
| 3
| 1
| 0
| 4
| 1
| 4
| 14
| 34
| 6
| 24
| 14
| 19
| 4
| 24
| 14
| 19
| 1
| 1
| 0
| 4
|
2,387
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/encodec/modeling_encodec.py
|
transformers.models.encodec.modeling_encodec.EncodecLSTM
|
from torch import nn
from .configuration_encodec import EncodecConfig
class EncodecLSTM(nn.Module):
"""
LSTM without worrying about the hidden state, nor the layout of the data. Expects input as convolutional layout.
"""
def __init__(self, config: EncodecConfig, dimension: int):
super().__init__()
self.lstm = nn.LSTM(dimension, dimension, config.num_lstm_layers)
def forward(self, hidden_states):
hidden_states = hidden_states.permute(2, 0, 1)
hidden_states = self.lstm(hidden_states)[0] + hidden_states
hidden_states = hidden_states.permute(1, 2, 0)
return hidden_states
|
class EncodecLSTM(nn.Module):
'''
LSTM without worrying about the hidden state, nor the layout of the data. Expects input as convolutional layout.
'''
def __init__(self, config: EncodecConfig, dimension: int):
pass
def forward(self, hidden_states):
pass
| 3
| 1
| 4
| 0
| 4
| 0
| 1
| 0.33
| 1
| 1
| 0
| 0
| 2
| 1
| 2
| 12
| 14
| 2
| 9
| 4
| 6
| 3
| 9
| 4
| 6
| 1
| 1
| 0
| 2
|
2,388
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/encodec/modeling_encodec.py
|
transformers.models.encodec.modeling_encodec.EncodecModel
|
from .configuration_encodec import EncodecConfig
import math
import torch
from torch import nn
from typing import Optional, Union
from ...utils import ModelOutput, auto_docstring, logging
@auto_docstring(custom_intro='\n The EnCodec neural audio codec model.\n ')
class EncodecModel(EncodecPreTrainedModel):
def __init__(self, config: EncodecConfig):
super().__init__(config)
self.config = config
self.encoder = EncodecEncoder(config)
self.decoder = EncodecDecoder(config)
self.quantizer = EncodecResidualVectorQuantizer(config)
self.bits_per_codebook = int(math.log2(self.config.codebook_size))
if 2 ** self.bits_per_codebook != self.config.codebook_size:
raise ValueError('The codebook_size must be a power of 2.')
self.post_init()
def get_encoder(self):
return self.encoder
def _encode_frame(self, input_values: torch.Tensor, bandwidth: float) -> tuple[torch.Tensor, Optional[torch.Tensor]]:
"""
Encodes the given input using the underlying VQVAE. If `config.normalize` is set to `True` the input is first
normalized. The padding mask is required to compute the correct scale.
"""
length = input_values.shape[-1]
duration = length / self.config.sampling_rate
if self.config.chunk_length_s is not None and duration > 1e-05 + self.config.chunk_length_s:
raise RuntimeError(f'Duration of frame ({duration}) is longer than chunk {self.config.chunk_length_s}')
scale = None
if self.config.normalize:
mono = torch.sum(input_values, 1, keepdim=True) / input_values.shape[1]
scale = mono.pow(2).mean(dim=-1, keepdim=True).sqrt() + 1e-08
input_values = input_values / scale
scale = scale.view(-1, 1)
embeddings = self.encoder(input_values)
codes = self.quantizer.encode(embeddings, bandwidth)
codes = codes.transpose(0, 1)
return (codes, scale)
def encode(self, input_values: torch.Tensor, padding_mask: Optional[torch.Tensor]=None, bandwidth: Optional[float]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.Tensor, Optional[torch.Tensor], int], EncodecEncoderOutput]:
"""
Encodes the input audio waveform into discrete codes of shape
`(nb_frames, batch_size, nb_quantizers, frame_len)`.
- `nb_frames=1` if `self.config.chunk_length=None` (as the encoder is applied on the full audio), which is the
case for the 24kHz model. Otherwise, `nb_frames=ceil(input_length/self.config.chunk_stride)`, which is the case
for the 48kHz model.
- `frame_len` is the length of each frame, which is equal to `ceil(input_length/self.config.hop_length)` if
`self.config.chunk_length=None` (e.g., for the 24kHz model). Otherwise, if `self.config.chunk_length` is
defined, `frame_len=self.config.chunk_length/self.config.hop_length`, e.g., the case for the 48kHz model with
`frame_len=150`.
Args:
input_values (`torch.Tensor` of shape `(batch_size, channels, sequence_length)`):
Float values of the input audio waveform.
padding_mask (`torch.Tensor` of shape `(batch_size, channels, sequence_length)`):
Padding mask used to pad the `input_values`.
bandwidth (`float`, *optional*):
The target bandwidth. Must be one of `config.target_bandwidths`. If `None`, uses the smallest possible
bandwidth. bandwidth is represented as a thousandth of what it is, e.g. 6kbps bandwidth is represented
as bandwidth == 6.0
Returns:
EncodecEncoderOutput dict or a tuple containing:
- audio_codes (`torch.LongTensor` of shape `(nb_frames, batch_size, nb_quantizers, frame_len)`, *optional*),
- audio_scales (list of length `nb_frames` of `torch.Tensor` of shape `(batch_size, 1)`, *optional*),
- last_frame_pad_length (`int`, *optional*).
"""
return_dict = return_dict if return_dict is not None else self.config.return_dict
if bandwidth is None:
bandwidth = self.config.target_bandwidths[0]
if bandwidth not in self.config.target_bandwidths:
raise ValueError(f"This model doesn't support the bandwidth {bandwidth}. Select one of {self.config.target_bandwidths}.")
_, channels, input_length = input_values.shape
if channels < 1 or channels > 2:
raise ValueError(f'Number of audio channels must be 1 or 2, but got {channels}')
chunk_length = self.config.chunk_length
if chunk_length is None:
chunk_length = input_length
stride = input_length
else:
stride = self.config.chunk_stride
if padding_mask is None:
padding_mask = torch.ones_like(input_values).bool()
else:
padding_mask = padding_mask.view(padding_mask.shape[0], -1, padding_mask.shape[-1])
encoded_frames = []
scales = []
for offset in range(0, input_length, stride):
mask = padding_mask[..., offset:offset + chunk_length].bool()
frame = mask * input_values[..., offset:offset + chunk_length]
encoded_frame, scale = self._encode_frame(frame, bandwidth)
encoded_frames.append(encoded_frame)
scales.append(scale)
last_frame_pad_length = encoded_frames[0].shape[-1] - encoded_frames[-1].shape[-1]
if last_frame_pad_length > 0:
last_frame = nn.functional.pad(encoded_frames[-1], (0, last_frame_pad_length), value=0)
encoded_frames[-1] = last_frame
encoded_frames = torch.stack(encoded_frames)
if not return_dict:
return (encoded_frames, scales, last_frame_pad_length)
return EncodecEncoderOutput(encoded_frames, scales, last_frame_pad_length)
@staticmethod
def _linear_overlap_add(frames: list[torch.Tensor], stride: int):
if len(frames) == 0:
raise ValueError('`frames` cannot be an empty list.')
device = frames[0].device
dtype = frames[0].dtype
shape = frames[0].shape[:-1]
total_size = stride * (len(frames) - 1) + frames[-1].shape[-1]
frame_length = frames[0].shape[-1]
time_vec = torch.linspace(0, 1, frame_length + 2, device=device, dtype=dtype)[1:-1]
weight = 0.5 - (time_vec - 0.5).abs()
sum_weight = torch.zeros(total_size, device=device, dtype=dtype)
out = torch.zeros(*shape, total_size, device=device, dtype=dtype)
offset: int = 0
for frame in frames:
frame_length = frame.shape[-1]
out[..., offset:offset + frame_length] += weight[:frame_length] * frame
sum_weight[offset:offset + frame_length] += weight[:frame_length]
offset += stride
if sum_weight.min() == 0:
raise ValueError(f'`sum_weight` minimum element must be bigger than zero: {sum_weight}`')
return out / sum_weight
def _decode_frame(self, codes: torch.Tensor, scale: Optional[torch.Tensor]=None) -> torch.Tensor:
codes = codes.transpose(0, 1)
embeddings = self.quantizer.decode(codes)
outputs = self.decoder(embeddings)
if scale is not None:
outputs = outputs * scale.view(-1, 1, 1)
return outputs
def decode(self, audio_codes: torch.LongTensor, audio_scales: torch.Tensor, padding_mask: Optional[torch.Tensor]=None, return_dict: Optional[bool]=None, last_frame_pad_length: Optional[int]=0) -> Union[tuple[torch.Tensor, torch.Tensor], EncodecDecoderOutput]:
"""
Decodes the given frames into an output audio waveform.
Note that the output might be a bit bigger than the input. In that case, any extra steps at the end can be
trimmed.
Args:
audio_codes (`torch.LongTensor` of shape `(nb_frames, batch_size, nb_quantizers, frame_len)`, *optional*):
Discrete code embeddings computed using `model.encode`.
audio_scales (list of length `nb_frames` of `torch.Tensor` of shape `(batch_size, 1)`, *optional*):
Scaling factor for each `audio_codes` input.
padding_mask (`torch.Tensor` of shape `(channels, sequence_length)`):
Padding mask used to pad the `input_values`.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
last_frame_pad_length (`int`, *optional*):
Integer representing the length of the padding in the last frame, which is removed during decoding.
"""
return_dict = return_dict if return_dict is not None else self.config.return_dict
chunk_length = self.config.chunk_length
if chunk_length is None:
if len(audio_codes) != 1:
raise ValueError(f'Expected one frame, got {len(audio_codes)}')
frame = audio_codes[0]
if last_frame_pad_length > 0:
frame = frame[..., :-last_frame_pad_length]
audio_values = self._decode_frame(frame, audio_scales[0])
else:
decoded_frames = []
for i, (frame, scale) in enumerate(zip(audio_codes, audio_scales)):
if i == len(audio_codes) - 1 and last_frame_pad_length > 0:
frame = frame[..., :-last_frame_pad_length]
frames = self._decode_frame(frame, scale)
decoded_frames.append(frames)
audio_values = self._linear_overlap_add(decoded_frames, self.config.chunk_stride or 1)
if padding_mask is not None and padding_mask.shape[-1] < audio_values.shape[-1]:
audio_values = audio_values[..., :padding_mask.shape[-1]]
if not return_dict:
return (audio_values,)
return EncodecDecoderOutput(audio_values)
@auto_docstring
def forward(self, input_values: torch.FloatTensor, padding_mask: Optional[torch.BoolTensor]=None, bandwidth: Optional[float]=None, audio_codes: Optional[torch.LongTensor]=None, audio_scales: Optional[torch.Tensor]=None, return_dict: Optional[bool]=None, last_frame_pad_length: Optional[int]=0) -> Union[tuple[torch.Tensor, torch.Tensor], EncodecOutput]:
"""
input_values (`torch.FloatTensor` of shape `(batch_size, channels, sequence_length)`, *optional*):
Raw audio input converted to Float and padded to the appropriate length in order to be encoded using chunks
of length self.chunk_length and a stride of `config.chunk_stride`.
padding_mask (`torch.BoolTensor` of shape `(batch_size, channels, sequence_length)`, *optional*):
Mask to avoid computing scaling factors on padding token indices (can we avoid computing conv on these+).
Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
<Tip warning={true}>
`padding_mask` should always be passed, unless the input was truncated or not padded. This is because in
order to process tensors effectively, the input audio should be padded so that `input_length % stride =
step` with `step = chunk_length-stride`. This ensures that all chunks are of the same shape
</Tip>
bandwidth (`float`, *optional*):
The target bandwidth. Must be one of `config.target_bandwidths`. If `None`, uses the smallest possible
bandwidth. bandwidth is represented as a thousandth of what it is, e.g. 6kbps bandwidth is represented as
`bandwidth == 6.0`
audio_codes (`torch.LongTensor` of shape `(nb_frames, batch_size, nb_quantizers, frame_len)`, *optional*):
Discrete code embeddings computed using `model.encode`.
audio_scales (list of length `nb_frames` of `torch.Tensor` of shape `(batch_size, 1)`, *optional*):
Scaling factor for each `audio_codes` input.
return_dict (`bool`, *optional*):
Whether to return outputs as a dict.
last_frame_pad_length (`int`, *optional*):
The length of the padding in the last frame, if any. This is used to ensure that the encoded frames can be
outputted as a tensor. This value should be passed during decoding to ensure padding is removed from the
encoded frames.
Examples:
```python
>>> from datasets import load_dataset
>>> from transformers import AutoProcessor, EncodecModel
>>> dataset = load_dataset("hf-internal-testing/ashraq-esc50-1-dog-example")
>>> audio_sample = dataset["train"]["audio"][0]["array"]
>>> model_id = "facebook/encodec_24khz"
>>> model = EncodecModel.from_pretrained(model_id)
>>> processor = AutoProcessor.from_pretrained(model_id)
>>> inputs = processor(raw_audio=audio_sample, return_tensors="pt")
>>> outputs = model(**inputs)
>>> audio_codes = outputs.audio_codes
>>> audio_values = outputs.audio_values
```"""
return_dict = return_dict if return_dict is not None else self.config.return_dict
if padding_mask is None:
padding_mask = torch.ones_like(input_values).bool()
else:
padding_mask = padding_mask.view(padding_mask.shape[0], -1, padding_mask.shape[-1])
if audio_codes is not None and audio_scales is None:
raise ValueError('You specified `audio_codes` but did not specify the `audio_scales`')
if audio_scales is not None and audio_codes is None:
raise ValueError('You specified `audio_scales` but did not specify the `audio_codes`')
if audio_scales is None and audio_codes is None:
audio_codes, audio_scales, last_frame_pad_length = self.encode(input_values, padding_mask, bandwidth, False)
audio_values = self.decode(audio_codes, audio_scales, padding_mask, return_dict=return_dict, last_frame_pad_length=last_frame_pad_length)[0]
if not return_dict:
return (audio_codes, audio_values)
return EncodecOutput(audio_codes=audio_codes, audio_values=audio_values)
|
@auto_docstring(custom_intro='\n The EnCodec neural audio codec model.\n ')
class EncodecModel(EncodecPreTrainedModel):
def __init__(self, config: EncodecConfig):
pass
def get_encoder(self):
pass
def _encode_frame(self, input_values: torch.Tensor, bandwidth: float) -> tuple[torch.Tensor, Optional[torch.Tensor]]:
'''
Encodes the given input using the underlying VQVAE. If `config.normalize` is set to `True` the input is first
normalized. The padding mask is required to compute the correct scale.
'''
pass
def encode(self, input_values: torch.Tensor, padding_mask: Optional[torch.Tensor]=None, bandwidth: Optional[float]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.Tensor, Optional[torch.Tensor], int], EncodecEncoderOutput]:
'''
Encodes the input audio waveform into discrete codes of shape
`(nb_frames, batch_size, nb_quantizers, frame_len)`.
- `nb_frames=1` if `self.config.chunk_length=None` (as the encoder is applied on the full audio), which is the
case for the 24kHz model. Otherwise, `nb_frames=ceil(input_length/self.config.chunk_stride)`, which is the case
for the 48kHz model.
- `frame_len` is the length of each frame, which is equal to `ceil(input_length/self.config.hop_length)` if
`self.config.chunk_length=None` (e.g., for the 24kHz model). Otherwise, if `self.config.chunk_length` is
defined, `frame_len=self.config.chunk_length/self.config.hop_length`, e.g., the case for the 48kHz model with
`frame_len=150`.
Args:
input_values (`torch.Tensor` of shape `(batch_size, channels, sequence_length)`):
Float values of the input audio waveform.
padding_mask (`torch.Tensor` of shape `(batch_size, channels, sequence_length)`):
Padding mask used to pad the `input_values`.
bandwidth (`float`, *optional*):
The target bandwidth. Must be one of `config.target_bandwidths`. If `None`, uses the smallest possible
bandwidth. bandwidth is represented as a thousandth of what it is, e.g. 6kbps bandwidth is represented
as bandwidth == 6.0
Returns:
EncodecEncoderOutput dict or a tuple containing:
- audio_codes (`torch.LongTensor` of shape `(nb_frames, batch_size, nb_quantizers, frame_len)`, *optional*),
- audio_scales (list of length `nb_frames` of `torch.Tensor` of shape `(batch_size, 1)`, *optional*),
- last_frame_pad_length (`int`, *optional*).
'''
pass
@staticmethod
def _linear_overlap_add(frames: list[torch.Tensor], stride: int):
pass
def _decode_frame(self, codes: torch.Tensor, scale: Optional[torch.Tensor]=None) -> torch.Tensor:
pass
def decode(self, audio_codes: torch.LongTensor, audio_scales: torch.Tensor, padding_mask: Optional[torch.Tensor]=None, return_dict: Optional[bool]=None, last_frame_pad_length: Optional[int]=0) -> Union[tuple[torch.Tensor, torch.Tensor], EncodecDecoderOutput]:
'''
Decodes the given frames into an output audio waveform.
Note that the output might be a bit bigger than the input. In that case, any extra steps at the end can be
trimmed.
Args:
audio_codes (`torch.LongTensor` of shape `(nb_frames, batch_size, nb_quantizers, frame_len)`, *optional*):
Discrete code embeddings computed using `model.encode`.
audio_scales (list of length `nb_frames` of `torch.Tensor` of shape `(batch_size, 1)`, *optional*):
Scaling factor for each `audio_codes` input.
padding_mask (`torch.Tensor` of shape `(channels, sequence_length)`):
Padding mask used to pad the `input_values`.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
last_frame_pad_length (`int`, *optional*):
Integer representing the length of the padding in the last frame, which is removed during decoding.
'''
pass
@auto_docstring
def forward(self, input_values: torch.FloatTensor, padding_mask: Optional[torch.BoolTensor]=None, bandwidth: Optional[float]=None, audio_codes: Optional[torch.LongTensor]=None, audio_scales: Optional[torch.Tensor]=None, return_dict: Optional[bool]=None, last_frame_pad_length: Optional[int]=0) -> Union[tuple[torch.Tensor, torch.Tensor], EncodecOutput]:
'''
input_values (`torch.FloatTensor` of shape `(batch_size, channels, sequence_length)`, *optional*):
Raw audio input converted to Float and padded to the appropriate length in order to be encoded using chunks
of length self.chunk_length and a stride of `config.chunk_stride`.
padding_mask (`torch.BoolTensor` of shape `(batch_size, channels, sequence_length)`, *optional*):
Mask to avoid computing scaling factors on padding token indices (can we avoid computing conv on these+).
Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
<Tip warning={true}>
`padding_mask` should always be passed, unless the input was truncated or not padded. This is because in
order to process tensors effectively, the input audio should be padded so that `input_length % stride =
step` with `step = chunk_length-stride`. This ensures that all chunks are of the same shape
</Tip>
bandwidth (`float`, *optional*):
The target bandwidth. Must be one of `config.target_bandwidths`. If `None`, uses the smallest possible
bandwidth. bandwidth is represented as a thousandth of what it is, e.g. 6kbps bandwidth is represented as
`bandwidth == 6.0`
audio_codes (`torch.LongTensor` of shape `(nb_frames, batch_size, nb_quantizers, frame_len)`, *optional*):
Discrete code embeddings computed using `model.encode`.
audio_scales (list of length `nb_frames` of `torch.Tensor` of shape `(batch_size, 1)`, *optional*):
Scaling factor for each `audio_codes` input.
return_dict (`bool`, *optional*):
Whether to return outputs as a dict.
last_frame_pad_length (`int`, *optional*):
The length of the padding in the last frame, if any. This is used to ensure that the encoded frames can be
outputted as a tensor. This value should be passed during decoding to ensure padding is removed from the
encoded frames.
Examples:
```python
>>> from datasets import load_dataset
>>> from transformers import AutoProcessor, EncodecModel
>>> dataset = load_dataset("hf-internal-testing/ashraq-esc50-1-dog-example")
>>> audio_sample = dataset["train"]["audio"][0]["array"]
>>> model_id = "facebook/encodec_24khz"
>>> model = EncodecModel.from_pretrained(model_id)
>>> processor = AutoProcessor.from_pretrained(model_id)
>>> inputs = processor(raw_audio=audio_sample, return_tensors="pt")
>>> outputs = model(**inputs)
>>> audio_codes = outputs.audio_codes
>>> audio_values = outputs.audio_values
```'''
pass
| 12
| 4
| 29
| 5
| 16
| 8
| 4
| 0.47
| 1
| 16
| 7
| 0
| 8
| 5
| 9
| 10
| 277
| 54
| 152
| 74
| 117
| 71
| 120
| 50
| 110
| 10
| 2
| 2
| 37
|
2,389
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/encodec/modeling_encodec.py
|
transformers.models.encodec.modeling_encodec.EncodecOutput
|
from typing import Optional, Union
from ...utils import ModelOutput, auto_docstring, logging
import torch
from dataclasses import dataclass
@dataclass
@auto_docstring
class EncodecOutput(ModelOutput):
"""
audio_codes (`torch.LongTensor` of shape `(nb_frames, batch_size, nb_quantizers, frame_len)`, *optional*):
Discrete code embeddings computed using `model.encode`.
audio_values (`torch.FloatTensor` of shape `(batch_size, segment_length)`, *optional*):
Decoded audio values, obtained using the decoder part of Encodec.
"""
audio_codes: Optional[torch.LongTensor] = None
audio_values: Optional[torch.FloatTensor] = None
|
@dataclass
@auto_docstring
class EncodecOutput(ModelOutput):
'''
audio_codes (`torch.LongTensor` of shape `(nb_frames, batch_size, nb_quantizers, frame_len)`, *optional*):
Discrete code embeddings computed using `model.encode`.
audio_values (`torch.FloatTensor` of shape `(batch_size, segment_length)`, *optional*):
Decoded audio values, obtained using the decoder part of Encodec.
'''
pass
| 3
| 1
| 0
| 0
| 0
| 0
| 0
| 2.33
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 11
| 1
| 3
| 3
| 2
| 7
| 3
| 3
| 2
| 0
| 1
| 0
| 0
|
2,390
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/encodec/modeling_encodec.py
|
transformers.models.encodec.modeling_encodec.EncodecPreTrainedModel
|
from ...utils import ModelOutput, auto_docstring, logging
from .configuration_encodec import EncodecConfig
from torch import nn
import math
from ...modeling_utils import PreTrainedAudioTokenizerBase
@auto_docstring
class EncodecPreTrainedModel(PreTrainedAudioTokenizerBase):
config: EncodecConfig
base_model_prefix = 'encodec'
main_input_name = 'input_values'
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, nn.GroupNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
elif isinstance(module, nn.Conv1d):
nn.init.kaiming_normal_(module.weight)
if module.bias is not None:
k = math.sqrt(module.groups / (module.in_channels * module.kernel_size[0]))
nn.init.uniform_(module.bias, a=-k, b=k)
elif isinstance(module, nn.ConvTranspose1d):
module.reset_parameters()
elif isinstance(module, nn.LSTM):
for name, param in module.named_parameters():
if 'weight' in name:
nn.init.xavier_uniform_(param)
elif 'bias' in name:
nn.init.constant_(param, 0.0)
|
@auto_docstring
class EncodecPreTrainedModel(PreTrainedAudioTokenizerBase):
def _init_weights(self, module):
'''Initialize the weights'''
pass
| 3
| 1
| 24
| 0
| 23
| 1
| 12
| 0.19
| 1
| 0
| 0
| 1
| 1
| 0
| 1
| 1
| 34
| 2
| 27
| 7
| 25
| 5
| 22
| 7
| 20
| 12
| 1
| 3
| 12
|
2,391
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/encodec/modeling_encodec.py
|
transformers.models.encodec.modeling_encodec.EncodecResidualVectorQuantizer
|
from .configuration_encodec import EncodecConfig
import torch
import math
from torch import nn
from typing import Optional, Union
class EncodecResidualVectorQuantizer(nn.Module):
"""Residual Vector Quantizer."""
def __init__(self, config: EncodecConfig):
super().__init__()
self.codebook_size = config.codebook_size
self.frame_rate = config.frame_rate
self.num_quantizers = config.num_quantizers
self.layers = nn.ModuleList([EncodecVectorQuantization(config) for _ in range(config.num_quantizers)])
def get_num_quantizers_for_bandwidth(self, bandwidth: Optional[float]=None) -> int:
"""Return num_quantizers based on specified target bandwidth."""
bw_per_q = math.log2(self.codebook_size) * self.frame_rate
num_quantizers = self.num_quantizers
if bandwidth is not None and bandwidth > 0.0:
num_quantizers = int(max(1, math.floor(bandwidth * 1000 / bw_per_q)))
return num_quantizers
def encode(self, embeddings: torch.Tensor, bandwidth: Optional[float]=None) -> torch.Tensor:
"""
Encode a given input tensor with the specified frame rate at the given bandwidth. The RVQ encode method sets
the appropriate number of quantizers to use and returns indices for each quantizer.
"""
num_quantizers = self.get_num_quantizers_for_bandwidth(bandwidth)
residual = embeddings
all_indices = []
for layer in self.layers[:num_quantizers]:
indices = layer.encode(residual)
quantized = layer.decode(indices)
residual = residual - quantized
all_indices.append(indices)
out_indices = torch.stack(all_indices)
return out_indices
def decode(self, codes: torch.Tensor) -> torch.Tensor:
"""Decode the given codes to the quantized representation."""
quantized_out = torch.tensor(0.0, device=codes.device)
for i, indices in enumerate(codes):
layer = self.layers[i]
quantized = layer.decode(indices)
quantized_out = quantized_out + quantized
return quantized_out
|
class EncodecResidualVectorQuantizer(nn.Module):
'''Residual Vector Quantizer.'''
def __init__(self, config: EncodecConfig):
pass
def get_num_quantizers_for_bandwidth(self, bandwidth: Optional[float]=None) -> int:
'''Return num_quantizers based on specified target bandwidth.'''
pass
def encode(self, embeddings: torch.Tensor, bandwidth: Optional[float]=None) -> torch.Tensor:
'''
Encode a given input tensor with the specified frame rate at the given bandwidth. The RVQ encode method sets
the appropriate number of quantizers to use and returns indices for each quantizer.
'''
pass
def decode(self, codes: torch.Tensor) -> torch.Tensor:
'''Decode the given codes to the quantized representation.'''
pass
| 5
| 4
| 9
| 0
| 8
| 2
| 2
| 0.23
| 1
| 8
| 2
| 0
| 4
| 4
| 4
| 14
| 42
| 4
| 31
| 22
| 26
| 7
| 31
| 22
| 26
| 2
| 1
| 1
| 7
|
2,392
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/encodec/modeling_encodec.py
|
transformers.models.encodec.modeling_encodec.EncodecResnetBlock
|
from torch import nn
from .configuration_encodec import EncodecConfig
class EncodecResnetBlock(nn.Module):
"""
Residual block from SEANet model as used by EnCodec.
"""
def __init__(self, config: EncodecConfig, dim: int, dilations: list[int]):
super().__init__()
kernel_sizes = (config.residual_kernel_size, 1)
if len(kernel_sizes) != len(dilations):
raise ValueError('Number of kernel sizes should match number of dilations')
hidden = dim // config.compress
block = []
for i, (kernel_size, dilation) in enumerate(zip(kernel_sizes, dilations)):
in_chs = dim if i == 0 else hidden
out_chs = dim if i == len(kernel_sizes) - 1 else hidden
block += [nn.ELU()]
block += [EncodecConv1d(config, in_chs, out_chs, kernel_size, dilation=dilation)]
self.block = nn.ModuleList(block)
if config.use_conv_shortcut:
self.shortcut = EncodecConv1d(config, dim, dim, kernel_size=1)
else:
self.shortcut = nn.Identity()
def forward(self, hidden_states):
residual = hidden_states
for layer in self.block:
hidden_states = layer(hidden_states)
return self.shortcut(residual) + hidden_states
|
class EncodecResnetBlock(nn.Module):
'''
Residual block from SEANet model as used by EnCodec.
'''
def __init__(self, config: EncodecConfig, dim: int, dilations: list[int]):
pass
def forward(self, hidden_states):
pass
| 3
| 1
| 13
| 2
| 11
| 0
| 4
| 0.13
| 1
| 7
| 2
| 0
| 2
| 2
| 2
| 12
| 31
| 5
| 23
| 13
| 20
| 3
| 22
| 13
| 19
| 6
| 1
| 1
| 8
|
2,393
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/encodec/modeling_encodec.py
|
transformers.models.encodec.modeling_encodec.EncodecVectorQuantization
|
from .configuration_encodec import EncodecConfig
from torch import nn
class EncodecVectorQuantization(nn.Module):
"""
Vector quantization implementation. Currently supports only euclidean distance.
"""
def __init__(self, config: EncodecConfig):
super().__init__()
self.codebook = EncodecEuclideanCodebook(config)
def encode(self, hidden_states):
hidden_states = hidden_states.permute(0, 2, 1)
embed_in = self.codebook.encode(hidden_states)
return embed_in
def decode(self, embed_ind):
quantize = self.codebook.decode(embed_ind)
quantize = quantize.permute(0, 2, 1)
return quantize
|
class EncodecVectorQuantization(nn.Module):
'''
Vector quantization implementation. Currently supports only euclidean distance.
'''
def __init__(self, config: EncodecConfig):
pass
def encode(self, hidden_states):
pass
def decode(self, embed_ind):
pass
| 4
| 1
| 4
| 0
| 4
| 0
| 1
| 0.25
| 1
| 3
| 2
| 0
| 3
| 1
| 3
| 13
| 18
| 3
| 12
| 7
| 8
| 3
| 12
| 7
| 8
| 1
| 1
| 0
| 3
|
2,394
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/encoder_decoder/configuration_encoder_decoder.py
|
transformers.models.encoder_decoder.configuration_encoder_decoder.EncoderDecoderConfig
|
from ..auto import AutoConfig
from ...configuration_utils import PretrainedConfig
class EncoderDecoderConfig(PretrainedConfig):
"""
[`EncoderDecoderConfig`] is the configuration class to store the configuration of a [`EncoderDecoderModel`]. It is
used to instantiate an Encoder Decoder model according to the specified arguments, defining the encoder and decoder
configs.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
kwargs (*optional*):
Dictionary of keyword arguments. Notably:
- **encoder** ([`PretrainedConfig`], *optional*) -- An instance of a configuration object that defines
the encoder config.
- **decoder** ([`PretrainedConfig`], *optional*) -- An instance of a configuration object that defines
the decoder config.
Examples:
```python
>>> from transformers import BertConfig, EncoderDecoderConfig, EncoderDecoderModel
>>> # Initializing a BERT google-bert/bert-base-uncased style configuration
>>> config_encoder = BertConfig()
>>> config_decoder = BertConfig()
>>> config = EncoderDecoderConfig.from_encoder_decoder_configs(config_encoder, config_decoder)
>>> # Initializing a Bert2Bert model (with random weights) from the google-bert/bert-base-uncased style configurations
>>> model = EncoderDecoderModel(config=config)
>>> # Accessing the model configuration
>>> config_encoder = model.config.encoder
>>> config_decoder = model.config.decoder
>>> # set decoder config to causal lm
>>> config_decoder.is_decoder = True
>>> config_decoder.add_cross_attention = True
>>> # Saving the model, including its configuration
>>> model.save_pretrained("my-model")
>>> # loading model and config from pretrained folder
>>> encoder_decoder_config = EncoderDecoderConfig.from_pretrained("my-model")
>>> model = EncoderDecoderModel.from_pretrained("my-model", config=encoder_decoder_config)
```"""
model_type = 'encoder-decoder'
sub_configs = {'encoder': AutoConfig, 'decoder': AutoConfig}
has_no_defaults_at_init = True
def __init__(self, **kwargs):
super().__init__(**kwargs)
if 'encoder' not in kwargs or 'decoder' not in kwargs:
raise ValueError(f'A configuration of type {self.model_type} cannot be instantiated because both `encoder` and `decoder` sub-configurations were not passed, only {kwargs}')
encoder_config = kwargs.pop('encoder')
encoder_model_type = encoder_config.pop('model_type')
decoder_config = kwargs.pop('decoder')
decoder_model_type = decoder_config.pop('model_type')
self.encoder = AutoConfig.for_model(encoder_model_type, **encoder_config)
self.decoder = AutoConfig.for_model(decoder_model_type, **decoder_config)
self.is_encoder_decoder = True
@classmethod
def from_encoder_decoder_configs(cls, encoder_config: PretrainedConfig, decoder_config: PretrainedConfig, **kwargs) -> PretrainedConfig:
"""
Instantiate a [`EncoderDecoderConfig`] (or a derived class) from a pre-trained encoder model configuration and
decoder model configuration.
Returns:
[`EncoderDecoderConfig`]: An instance of a configuration object
"""
logger.info('Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config')
decoder_config.is_decoder = True
decoder_config.add_cross_attention = True
return cls(encoder=encoder_config.to_dict(), decoder=decoder_config.to_dict(), **kwargs)
|
class EncoderDecoderConfig(PretrainedConfig):
'''
[`EncoderDecoderConfig`] is the configuration class to store the configuration of a [`EncoderDecoderModel`]. It is
used to instantiate an Encoder Decoder model according to the specified arguments, defining the encoder and decoder
configs.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
kwargs (*optional*):
Dictionary of keyword arguments. Notably:
- **encoder** ([`PretrainedConfig`], *optional*) -- An instance of a configuration object that defines
the encoder config.
- **decoder** ([`PretrainedConfig`], *optional*) -- An instance of a configuration object that defines
the decoder config.
Examples:
```python
>>> from transformers import BertConfig, EncoderDecoderConfig, EncoderDecoderModel
>>> # Initializing a BERT google-bert/bert-base-uncased style configuration
>>> config_encoder = BertConfig()
>>> config_decoder = BertConfig()
>>> config = EncoderDecoderConfig.from_encoder_decoder_configs(config_encoder, config_decoder)
>>> # Initializing a Bert2Bert model (with random weights) from the google-bert/bert-base-uncased style configurations
>>> model = EncoderDecoderModel(config=config)
>>> # Accessing the model configuration
>>> config_encoder = model.config.encoder
>>> config_decoder = model.config.decoder
>>> # set decoder config to causal lm
>>> config_decoder.is_decoder = True
>>> config_decoder.add_cross_attention = True
>>> # Saving the model, including its configuration
>>> model.save_pretrained("my-model")
>>> # loading model and config from pretrained folder
>>> encoder_decoder_config = EncoderDecoderConfig.from_pretrained("my-model")
>>> model = EncoderDecoderModel.from_pretrained("my-model", config=encoder_decoder_config)
```'''
def __init__(self, **kwargs):
pass
@classmethod
def from_encoder_decoder_configs(cls, encoder_config: PretrainedConfig, decoder_config: PretrainedConfig, **kwargs) -> PretrainedConfig:
'''
Instantiate a [`EncoderDecoderConfig`] (or a derived class) from a pre-trained encoder model configuration and
decoder model configuration.
Returns:
[`EncoderDecoderConfig`]: An instance of a configuration object
'''
pass
| 4
| 2
| 15
| 2
| 11
| 3
| 2
| 1.54
| 1
| 3
| 1
| 0
| 1
| 3
| 2
| 2
| 83
| 17
| 26
| 16
| 20
| 40
| 20
| 13
| 17
| 2
| 1
| 1
| 3
|
2,395
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/encoder_decoder/modeling_encoder_decoder.py
|
transformers.models.encoder_decoder.modeling_encoder_decoder.EncoderDecoderModel
|
from .configuration_encoder_decoder import EncoderDecoderConfig
from ...cache_utils import Cache
from ...utils.generic import can_return_tuple
import torch
from ...modeling_outputs import BaseModelOutput, Seq2SeqLMOutput
from ..auto.modeling_auto import AutoModel, AutoModelForCausalLM
from ...modeling_utils import PreTrainedModel
from ...generation import GenerationMixin
import inspect
import warnings
from ...utils import auto_docstring, logging
from ..auto.configuration_auto import AutoConfig
from torch.nn import CrossEntropyLoss
from ...configuration_utils import PretrainedConfig
from typing import Optional, Union
from torch import nn
@auto_docstring
class EncoderDecoderModel(PreTrainedModel, GenerationMixin):
"""
[`EncoderDecoderModel`] is a generic model class that will be instantiated as a transformer architecture with one
of the base model classes of the library as encoder and another one as decoder when created with the
:meth*~transformers.AutoModel.from_pretrained* class method for the encoder and
:meth*~transformers.AutoModelForCausalLM.from_pretrained* class method for the decoder.
"""
config: EncoderDecoderConfig
base_model_prefix = 'encoder_decoder'
main_input_name = 'input_ids'
supports_gradient_checkpointing = True
_supports_param_buffer_assignment = False
_supports_flash_attn = True
_supports_sdpa = True
def __init__(self, config: Optional[PretrainedConfig]=None, encoder: Optional[PreTrainedModel]=None, decoder: Optional[PreTrainedModel]=None):
"""
encoder (`PreTrainedModel`, *optional*):
The encoder model to use.
decoder (`PreTrainedModel`, *optional*):
The decoder model to use.
"""
if config is None and (encoder is None or decoder is None):
raise ValueError('Either a configuration or an encoder and a decoder has to be provided.')
if config is None:
config = EncoderDecoderConfig.from_encoder_decoder_configs(encoder.config, decoder.config)
elif not isinstance(config, self.config_class):
raise ValueError(f'Config: {config} has to be of type {self.config_class}')
if config.decoder.cross_attention_hidden_size is not None:
if config.decoder.cross_attention_hidden_size != config.encoder.hidden_size:
raise ValueError(f"If `cross_attention_hidden_size` is specified in the decoder's configuration, it has to be equal to the encoder's `hidden_size`. Got {config.decoder.cross_attention_hidden_size} for `config.decoder.cross_attention_hidden_size` and {config.encoder.hidden_size} for `config.encoder.hidden_size`.")
super().__init__(config)
if encoder is None:
from ..auto.modeling_auto import AutoModel
encoder = AutoModel.from_config(config.encoder)
if decoder is None:
from ..auto.modeling_auto import AutoModelForCausalLM
decoder = AutoModelForCausalLM.from_config(config.decoder)
self.encoder = encoder
self.decoder = decoder
if self.encoder.config.to_dict() != self.config.encoder.to_dict():
logger.warning(f'Config of the encoder: {self.encoder.__class__} is overwritten by shared encoder config: {self.config.encoder}')
if self.decoder.config.to_dict() != self.config.decoder.to_dict():
logger.warning(f'Config of the decoder: {self.decoder.__class__} is overwritten by shared decoder config: {self.config.decoder}')
self.config.encoder._attn_implementation = self.encoder.config._attn_implementation
self.config.decoder._attn_implementation = self.decoder.config._attn_implementation
self.encoder.config = self.config.encoder
self.decoder.config = self.config.decoder
if self.encoder.config.hidden_size != self.decoder.config.hidden_size and self.decoder.config.cross_attention_hidden_size is None:
self.enc_to_dec_proj = nn.Linear(self.encoder.config.hidden_size, self.decoder.config.hidden_size)
if self.encoder.get_output_embeddings() is not None:
raise ValueError(f'The encoder {self.encoder} should not have a LM Head. Please use a model without LM Head')
decoder_signature = set(inspect.signature(self.decoder.forward).parameters.keys())
if 'encoder_hidden_states' not in decoder_signature:
raise ValueError('The selected decoder is not prepared for the encoder hidden states to be passed. Please see the following discussion on GitHub: https://github.com/huggingface/transformers/issues/23350')
self.tie_weights()
def tie_weights(self):
self.encoder.tie_weights()
self.decoder.tie_weights()
if self.config.tie_encoder_decoder:
decoder_base_model_prefix = self.decoder.base_model_prefix
tied_weights = self._tie_encoder_decoder_weights(self.encoder, self.decoder._modules[decoder_base_model_prefix], self.decoder.base_model_prefix, 'encoder')
self._dynamic_tied_weights_keys = tied_weights
def _init_weights(self, module):
if module in self.encoder.modules():
self.encoder._init_weights(module)
elif module in self.decoder.modules():
self.decoder._init_weights(module)
def get_encoder(self):
return self.encoder
def get_input_embeddings(self):
return self.encoder.get_input_embeddings()
def get_output_embeddings(self):
return self.decoder.get_output_embeddings()
def set_output_embeddings(self, new_embeddings):
return self.decoder.set_output_embeddings(new_embeddings)
@classmethod
def from_encoder_decoder_pretrained(cls, encoder_pretrained_model_name_or_path: Optional[str]=None, decoder_pretrained_model_name_or_path: Optional[str]=None, *model_args, **kwargs) -> PreTrainedModel:
"""
Instantiate an encoder and a decoder from one or two base classes of the library from pretrained model
checkpoints.
The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated). To train
the model, you need to first set it back in training mode with `model.train()`.
Params:
encoder_pretrained_model_name_or_path (`str`, *optional*):
Information necessary to initiate the encoder. Can be either:
- A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co.
- A path to a *directory* containing model weights saved using
[`~PreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`.
decoder_pretrained_model_name_or_path (`str`, *optional*, defaults to `None`):
Information necessary to initiate the decoder. Can be either:
- A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co.
- A path to a *directory* containing model weights saved using
[`~PreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`.
model_args (remaining positional arguments, *optional*):
All remaining positional arguments will be passed to the underlying model's `__init__` method.
kwargs (remaining dictionary of keyword arguments, *optional*):
Can be used to update the configuration object (after it being loaded) and initiate the model (e.g.,
`output_attentions=True`).
- To update the encoder configuration, use the prefix *encoder_* for each configuration parameter.
- To update the decoder configuration, use the prefix *decoder_* for each configuration parameter.
- To update the parent model configuration, do not use a prefix for each configuration parameter.
Behaves differently depending on whether a `config` is provided or automatically loaded.
Example:
```python
>>> from transformers import EncoderDecoderModel
>>> # initialize a bert2bert from two pretrained BERT models. Note that the cross-attention layers will be randomly initialized
>>> model = EncoderDecoderModel.from_encoder_decoder_pretrained("google-bert/bert-base-uncased", "google-bert/bert-base-uncased")
>>> # saving model after fine-tuning
>>> model.save_pretrained("./bert2bert")
>>> # load fine-tuned model
>>> model = EncoderDecoderModel.from_pretrained("./bert2bert")
```"""
kwargs_encoder = {argument[len('encoder_'):]: value for argument, value in kwargs.items() if argument.startswith('encoder_')}
kwargs_decoder = {argument[len('decoder_'):]: value for argument, value in kwargs.items() if argument.startswith('decoder_')}
for key in kwargs_encoder:
del kwargs['encoder_' + key]
for key in kwargs_decoder:
del kwargs['decoder_' + key]
encoder = kwargs_encoder.pop('model', None)
if encoder is None:
if encoder_pretrained_model_name_or_path is None:
raise ValueError('If `encoder_model` is not defined as an argument, a `encoder_pretrained_model_name_or_path` has to be defined.')
if 'config' not in kwargs_encoder:
encoder_config, kwargs_encoder = AutoConfig.from_pretrained(encoder_pretrained_model_name_or_path, **kwargs_encoder, return_unused_kwargs=True)
if encoder_config.is_decoder is True or encoder_config.add_cross_attention is True:
logger.info(f'Initializing {encoder_pretrained_model_name_or_path} as a encoder model from a decoder model. Cross-attention and causal mask are disabled.')
encoder_config.is_decoder = False
encoder_config.add_cross_attention = False
kwargs_encoder['config'] = encoder_config
encoder = AutoModel.from_pretrained(encoder_pretrained_model_name_or_path, *model_args, **kwargs_encoder)
decoder = kwargs_decoder.pop('model', None)
if decoder is None:
if decoder_pretrained_model_name_or_path is None:
raise ValueError('If `decoder_model` is not defined as an argument, a `decoder_pretrained_model_name_or_path` has to be defined.')
if 'config' not in kwargs_decoder:
decoder_config, kwargs_decoder = AutoConfig.from_pretrained(decoder_pretrained_model_name_or_path, **kwargs_decoder, return_unused_kwargs=True)
if decoder_config.is_decoder is False or decoder_config.add_cross_attention is False:
logger.info(f"Initializing {decoder_pretrained_model_name_or_path} as a decoder model. Cross attention layers are added to {decoder_pretrained_model_name_or_path} and randomly initialized if {decoder_pretrained_model_name_or_path}'s architecture allows for cross attention layers.")
decoder_config.is_decoder = True
decoder_config.add_cross_attention = True
kwargs_decoder['config'] = decoder_config
if kwargs_decoder['config'].is_decoder is False or kwargs_decoder['config'].add_cross_attention is False:
logger.warning(f'Decoder model {decoder_pretrained_model_name_or_path} is not initialized as a decoder. In order to initialize {decoder_pretrained_model_name_or_path} as a decoder, make sure that the attributes `is_decoder` and `add_cross_attention` of `decoder_config` passed to `.from_encoder_decoder_pretrained(...)` are set to `True` or do not pass a `decoder_config` to `.from_encoder_decoder_pretrained(...)`')
decoder = AutoModelForCausalLM.from_pretrained(decoder_pretrained_model_name_or_path, **kwargs_decoder)
config = EncoderDecoderConfig.from_encoder_decoder_configs(encoder.config, decoder.config, **kwargs)
return cls(encoder=encoder, decoder=decoder, config=config)
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.BoolTensor]=None, encoder_outputs: Optional[tuple[torch.FloatTensor]]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, decoder_inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None, **kwargs) -> Union[tuple, Seq2SeqLMOutput]:
"""
decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Indices of decoder input sequence tokens in the vocabulary.
Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
`past_key_values`).
For training, `decoder_input_ids` are automatically created by the model by shifting the `labels` to the
right, replacing -100 by the `pad_token_id` and prepending them with the `decoder_start_token_id`.
decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
be used by default.
decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded
representation. This is useful if you want more control over how to convert `decoder_input_ids` indices
into associated vectors than the model's internal embedding lookup matrix.
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss for the decoder. Indices should be in `[-100, 0,
..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
Examples:
```python
>>> from transformers import EncoderDecoderModel, BertTokenizer
>>> import torch
>>> tokenizer = BertTokenizer.from_pretrained("google-bert/bert-base-uncased")
>>> model = EncoderDecoderModel.from_encoder_decoder_pretrained(
... "google-bert/bert-base-uncased", "google-bert/bert-base-uncased"
... ) # initialize Bert2Bert from pre-trained checkpoints
>>> # training
>>> model.config.decoder_start_token_id = tokenizer.cls_token_id
>>> model.config.pad_token_id = tokenizer.pad_token_id
>>> model.config.vocab_size = model.config.decoder.vocab_size
>>> input_ids = tokenizer("This is a really long text", return_tensors="pt").input_ids
>>> labels = tokenizer("This is the corresponding summary", return_tensors="pt").input_ids
>>> outputs = model(input_ids=input_ids, labels=labels)
>>> loss, logits = outputs.loss, outputs.logits
>>> # save and load from pretrained
>>> model.save_pretrained("bert2bert")
>>> model = EncoderDecoderModel.from_pretrained("bert2bert")
>>> # generation
>>> generated = model.generate(input_ids)
```"""
kwargs_shared = {key: kwargs[key] for key in ['output_attentions', 'output_hidden_states'] if key in kwargs}
kwargs_encoder = {argument: value for argument, value in kwargs.items() if not argument.startswith('decoder_')}
kwargs_encoder = kwargs_encoder | kwargs_shared
kwargs_decoder = {argument[len('decoder_'):]: value for argument, value in kwargs.items() if argument.startswith('decoder_')}
if 'num_items_in_batch' in kwargs_encoder:
kwargs_decoder['num_items_in_batch'] = kwargs_encoder.pop('num_items_in_batch', None)
kwargs_decoder = kwargs_decoder | kwargs_shared
if encoder_outputs is None:
encoder_outputs = self.encoder(input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, return_dict=True, **kwargs_encoder)
elif isinstance(encoder_outputs, tuple):
encoder_outputs = BaseModelOutput(*encoder_outputs)
encoder_hidden_states = encoder_outputs[0]
if self.encoder.config.hidden_size != self.decoder.config.hidden_size and self.decoder.config.cross_attention_hidden_size is None:
encoder_hidden_states = self.enc_to_dec_proj(encoder_hidden_states)
if labels is not None and (decoder_input_ids is None and decoder_inputs_embeds is None):
decoder_input_ids = shift_tokens_right(labels, self.config.pad_token_id, self.config.decoder_start_token_id)
if decoder_attention_mask is None:
decoder_attention_mask = decoder_input_ids.new_tensor(decoder_input_ids != self.config.pad_token_id)
decoder_outputs = self.decoder(input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=attention_mask, inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, past_key_values=past_key_values, cache_position=cache_position, return_dict=True, **kwargs_decoder)
loss = None
if labels is not None:
warnings.warn(DEPRECATION_WARNING, FutureWarning)
logits = decoder_outputs.logits
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.reshape(-1, self.decoder.config.vocab_size), labels.view(-1))
return Seq2SeqLMOutput(loss=loss, logits=decoder_outputs.logits, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions)
def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor):
return shift_tokens_right(labels, self.config.pad_token_id, self.config.decoder_start_token_id)
def resize_token_embeddings(self, *args, **kwargs):
raise NotImplementedError('Resizing the embedding layers via the EncoderDecoderModel directly is not supported. Please use the respective methods of the wrapped objects (model.encoder.resize_token_embeddings(...) or model.decoder.resize_token_embeddings(...))')
|
@auto_docstring
class EncoderDecoderModel(PreTrainedModel, GenerationMixin):
'''
[`EncoderDecoderModel`] is a generic model class that will be instantiated as a transformer architecture with one
of the base model classes of the library as encoder and another one as decoder when created with the
:meth*~transformers.AutoModel.from_pretrained* class method for the encoder and
:meth*~transformers.AutoModelForCausalLM.from_pretrained* class method for the decoder.
'''
def __init__(self, config: Optional[PretrainedConfig]=None, encoder: Optional[PreTrainedModel]=None, decoder: Optional[PreTrainedModel]=None):
'''
encoder (`PreTrainedModel`, *optional*):
The encoder model to use.
decoder (`PreTrainedModel`, *optional*):
The decoder model to use.
'''
pass
def tie_weights(self):
pass
def _init_weights(self, module):
pass
def get_encoder(self):
pass
def get_input_embeddings(self):
pass
def get_output_embeddings(self):
pass
def set_output_embeddings(self, new_embeddings):
pass
@classmethod
def from_encoder_decoder_pretrained(cls, encoder_pretrained_model_name_or_path: Optional[str]=None, decoder_pretrained_model_name_or_path: Optional[str]=None, *model_args, **kwargs) -> PreTrainedModel:
'''
Instantiate an encoder and a decoder from one or two base classes of the library from pretrained model
checkpoints.
The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated). To train
the model, you need to first set it back in training mode with `model.train()`.
Params:
encoder_pretrained_model_name_or_path (`str`, *optional*):
Information necessary to initiate the encoder. Can be either:
- A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co.
- A path to a *directory* containing model weights saved using
[`~PreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`.
decoder_pretrained_model_name_or_path (`str`, *optional*, defaults to `None`):
Information necessary to initiate the decoder. Can be either:
- A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co.
- A path to a *directory* containing model weights saved using
[`~PreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`.
model_args (remaining positional arguments, *optional*):
All remaining positional arguments will be passed to the underlying model's `__init__` method.
kwargs (remaining dictionary of keyword arguments, *optional*):
Can be used to update the configuration object (after it being loaded) and initiate the model (e.g.,
`output_attentions=True`).
- To update the encoder configuration, use the prefix *encoder_* for each configuration parameter.
- To update the decoder configuration, use the prefix *decoder_* for each configuration parameter.
- To update the parent model configuration, do not use a prefix for each configuration parameter.
Behaves differently depending on whether a `config` is provided or automatically loaded.
Example:
```python
>>> from transformers import EncoderDecoderModel
>>> # initialize a bert2bert from two pretrained BERT models. Note that the cross-attention layers will be randomly initialized
>>> model = EncoderDecoderModel.from_encoder_decoder_pretrained("google-bert/bert-base-uncased", "google-bert/bert-base-uncased")
>>> # saving model after fine-tuning
>>> model.save_pretrained("./bert2bert")
>>> # load fine-tuned model
>>> model = EncoderDecoderModel.from_pretrained("./bert2bert")
```'''
pass
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.BoolTensor]=None, encoder_outputs: Optional[tuple[torch.FloatTensor]]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, decoder_inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None, **kwargs) -> Union[tuple, Seq2SeqLMOutput]:
'''
decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Indices of decoder input sequence tokens in the vocabulary.
Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
`past_key_values`).
For training, `decoder_input_ids` are automatically created by the model by shifting the `labels` to the
right, replacing -100 by the `pad_token_id` and prepending them with the `decoder_start_token_id`.
decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
be used by default.
decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded
representation. This is useful if you want more control over how to convert `decoder_input_ids` indices
into associated vectors than the model's internal embedding lookup matrix.
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss for the decoder. Indices should be in `[-100, 0,
..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
Examples:
```python
>>> from transformers import EncoderDecoderModel, BertTokenizer
>>> import torch
>>> tokenizer = BertTokenizer.from_pretrained("google-bert/bert-base-uncased")
>>> model = EncoderDecoderModel.from_encoder_decoder_pretrained(
... "google-bert/bert-base-uncased", "google-bert/bert-base-uncased"
... ) # initialize Bert2Bert from pre-trained checkpoints
>>> # training
>>> model.config.decoder_start_token_id = tokenizer.cls_token_id
>>> model.config.pad_token_id = tokenizer.pad_token_id
>>> model.config.vocab_size = model.config.decoder.vocab_size
>>> input_ids = tokenizer("This is a really long text", return_tensors="pt").input_ids
>>> labels = tokenizer("This is the corresponding summary", return_tensors="pt").input_ids
>>> outputs = model(input_ids=input_ids, labels=labels)
>>> loss, logits = outputs.loss, outputs.logits
>>> # save and load from pretrained
>>> model.save_pretrained("bert2bert")
>>> model = EncoderDecoderModel.from_pretrained("bert2bert")
>>> # generation
>>> generated = model.generate(input_ids)
```'''
pass
def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor):
pass
def resize_token_embeddings(self, *args, **kwargs):
pass
| 16
| 4
| 37
| 6
| 23
| 8
| 4
| 0.37
| 2
| 17
| 6
| 0
| 11
| 4
| 13
| 13
| 515
| 90
| 310
| 94
| 262
| 115
| 177
| 63
| 160
| 13
| 1
| 3
| 58
|
2,396
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/ernie/configuration_ernie.py
|
transformers.models.ernie.configuration_ernie.ErnieConfig
|
from ...configuration_utils import PretrainedConfig
class ErnieConfig(PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`ErnieModel`] or a [`TFErnieModel`]. It is used to
instantiate a ERNIE model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the ERNIE
[nghuyong/ernie-3.0-base-zh](https://huggingface.co/nghuyong/ernie-3.0-base-zh) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 30522):
Vocabulary size of the ERNIE model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`ErnieModel`] or [`TFErnieModel`].
hidden_size (`int`, *optional*, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (`int`, *optional*, defaults to 3072):
Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"silu"` and `"gelu_new"` are supported.
hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout ratio for the attention probabilities.
max_position_embeddings (`int`, *optional*, defaults to 512):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
type_vocab_size (`int`, *optional*, defaults to 2):
The vocabulary size of the `token_type_ids` passed when calling [`ErnieModel`] or [`TFErnieModel`].
task_type_vocab_size (`int`, *optional*, defaults to 3):
The vocabulary size of the `task_type_ids` for ERNIE2.0/ERNIE3.0 model
use_task_id (`bool`, *optional*, defaults to `False`):
Whether or not the model support `task_type_ids`
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
The epsilon used by the layer normalization layers.
pad_token_id (`int`, *optional*, defaults to 0):
Padding token id.
position_embedding_type (`str`, *optional*, defaults to `"absolute"`):
Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For
positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to
[Self-Attention with Relative Position Representations (Shaw et al.)](https://huggingface.co/papers/1803.02155).
For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models
with Better Relative Position Embeddings (Huang et al.)](https://huggingface.co/papers/2009.13658).
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
classifier_dropout (`float`, *optional*):
The dropout ratio for the classification head.
Examples:
```python
>>> from transformers import ErnieConfig, ErnieModel
>>> # Initializing a ERNIE nghuyong/ernie-3.0-base-zh style configuration
>>> configuration = ErnieConfig()
>>> # Initializing a model (with random weights) from the nghuyong/ernie-3.0-base-zh style configuration
>>> model = ErnieModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = 'ernie'
def __init__(self, vocab_size=30522, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, task_type_vocab_size=3, use_task_id=False, initializer_range=0.02, layer_norm_eps=1e-12, pad_token_id=0, position_embedding_type='absolute', use_cache=True, classifier_dropout=None, **kwargs):
super().__init__(pad_token_id=pad_token_id, **kwargs)
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.task_type_vocab_size = task_type_vocab_size
self.use_task_id = use_task_id
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.position_embedding_type = position_embedding_type
self.use_cache = use_cache
self.classifier_dropout = classifier_dropout
|
class ErnieConfig(PretrainedConfig):
'''
This is the configuration class to store the configuration of a [`ErnieModel`] or a [`TFErnieModel`]. It is used to
instantiate a ERNIE model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the ERNIE
[nghuyong/ernie-3.0-base-zh](https://huggingface.co/nghuyong/ernie-3.0-base-zh) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 30522):
Vocabulary size of the ERNIE model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`ErnieModel`] or [`TFErnieModel`].
hidden_size (`int`, *optional*, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (`int`, *optional*, defaults to 3072):
Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"silu"` and `"gelu_new"` are supported.
hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout ratio for the attention probabilities.
max_position_embeddings (`int`, *optional*, defaults to 512):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
type_vocab_size (`int`, *optional*, defaults to 2):
The vocabulary size of the `token_type_ids` passed when calling [`ErnieModel`] or [`TFErnieModel`].
task_type_vocab_size (`int`, *optional*, defaults to 3):
The vocabulary size of the `task_type_ids` for ERNIE2.0/ERNIE3.0 model
use_task_id (`bool`, *optional*, defaults to `False`):
Whether or not the model support `task_type_ids`
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
The epsilon used by the layer normalization layers.
pad_token_id (`int`, *optional*, defaults to 0):
Padding token id.
position_embedding_type (`str`, *optional*, defaults to `"absolute"`):
Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For
positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to
[Self-Attention with Relative Position Representations (Shaw et al.)](https://huggingface.co/papers/1803.02155).
For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models
with Better Relative Position Embeddings (Huang et al.)](https://huggingface.co/papers/2009.13658).
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
classifier_dropout (`float`, *optional*):
The dropout ratio for the classification head.
Examples:
```python
>>> from transformers import ErnieConfig, ErnieModel
>>> # Initializing a ERNIE nghuyong/ernie-3.0-base-zh style configuration
>>> configuration = ErnieConfig()
>>> # Initializing a model (with random weights) from the nghuyong/ernie-3.0-base-zh style configuration
>>> model = ErnieModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```'''
def __init__(self, vocab_size=30522, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, task_type_vocab_size=3, use_task_id=False, initializer_range=0.02, layer_norm_eps=1e-12, pad_token_id=0, position_embedding_type='absolute', use_cache=True, classifier_dropout=None, **kwargs):
pass
| 2
| 1
| 41
| 1
| 40
| 0
| 1
| 1.48
| 1
| 1
| 0
| 0
| 1
| 17
| 1
| 1
| 115
| 11
| 42
| 41
| 19
| 62
| 21
| 20
| 19
| 1
| 1
| 0
| 1
|
2,397
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/ernie/configuration_ernie.py
|
transformers.models.ernie.configuration_ernie.ErnieOnnxConfig
|
from collections import OrderedDict
from collections.abc import Mapping
from ...onnx import OnnxConfig
class ErnieOnnxConfig(OnnxConfig):
@property
def inputs(self) -> Mapping[str, Mapping[int, str]]:
if self.task == 'multiple-choice':
dynamic_axis = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
dynamic_axis = {0: 'batch', 1: 'sequence'}
return OrderedDict([('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis), ('task_type_ids', dynamic_axis)])
|
class ErnieOnnxConfig(OnnxConfig):
@property
def inputs(self) -> Mapping[str, Mapping[int, str]]:
pass
| 3
| 0
| 13
| 0
| 13
| 0
| 2
| 0
| 1
| 3
| 0
| 0
| 1
| 0
| 1
| 1
| 15
| 0
| 15
| 4
| 12
| 0
| 6
| 3
| 4
| 2
| 1
| 1
| 2
|
2,398
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/ernie/modeling_ernie.py
|
transformers.models.ernie.modeling_ernie.ErnieAttention
|
from ...cache_utils import Cache, EncoderDecoderCache
import torch
import torch.nn as nn
from ...processing_utils import Unpack
from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
from typing import Callable, Optional, Union
from ...utils import ModelOutput, TransformersKwargs, auto_docstring, is_torch_flex_attn_available, logging
class ErnieAttention(nn.Module):
def __init__(self, config, position_embedding_type=None, is_causal=False, layer_idx=None, is_cross_attention=False):
super().__init__()
self.is_cross_attention = is_cross_attention
attention_class = ErnieCrossAttention if is_cross_attention else ErnieSelfAttention
self.self = attention_class(config, position_embedding_type=position_embedding_type, is_causal=is_causal, layer_idx=layer_idx)
self.output = ErnieSelfOutput(config)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads)
self.self.query = prune_linear_layer(self.self.query, index)
self.self.key = prune_linear_layer(self.self.key, index)
self.self.value = prune_linear_layer(self.self.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_value: Optional[Cache]=None, cache_position: Optional[torch.Tensor]=None, **kwargs: Unpack[TransformersKwargs]) -> tuple[torch.Tensor]:
attention_mask = attention_mask if not self.is_cross_attention else encoder_attention_mask
attention_output, attn_weights = self.self(hidden_states, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask, head_mask=head_mask, past_key_value=past_key_value, cache_position=cache_position, **kwargs)
attention_output = self.output(attention_output, hidden_states)
return (attention_output, attn_weights)
|
class ErnieAttention(nn.Module):
def __init__(self, config, position_embedding_type=None, is_causal=False, layer_idx=None, is_cross_attention=False):
pass
def prune_heads(self, heads):
pass
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_value: Optional[Cache]=None, cache_position: Optional[torch.Tensor]=None, **kwargs: Unpack[TransformersKwargs]) -> tuple[torch.Tensor]:
pass
| 4
| 0
| 15
| 1
| 14
| 1
| 1
| 0.07
| 1
| 5
| 1
| 0
| 3
| 3
| 3
| 13
| 49
| 4
| 43
| 20
| 30
| 3
| 22
| 11
| 18
| 2
| 1
| 1
| 4
|
2,399
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/ernie/modeling_ernie.py
|
transformers.models.ernie.modeling_ernie.ErnieEmbeddings
|
import torch.nn as nn
from typing import Callable, Optional, Union
import torch
class ErnieEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings."""
def __init__(self, config):
super().__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.position_embedding_type = getattr(config, 'position_embedding_type', 'absolute')
self.register_buffer('position_ids', torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False)
self.register_buffer('token_type_ids', torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False)
self.use_task_id = config.use_task_id
if config.use_task_id:
self.task_type_embeddings = nn.Embedding(config.task_type_vocab_size, config.hidden_size)
def forward(self, input_ids: Optional[torch.LongTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, task_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, past_key_values_length: int=0) -> torch.Tensor:
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
batch_size, seq_length = input_shape
if position_ids is None:
position_ids = self.position_ids[:, past_key_values_length:seq_length + past_key_values_length]
if token_type_ids is None:
if hasattr(self, 'token_type_ids'):
buffered_token_type_ids = self.token_type_ids.expand(position_ids.shape[0], -1)
buffered_token_type_ids = torch.gather(buffered_token_type_ids, dim=1, index=position_ids)
token_type_ids = buffered_token_type_ids.expand(batch_size, seq_length)
else:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + token_type_embeddings
if self.position_embedding_type == 'absolute':
position_embeddings = self.position_embeddings(position_ids)
embeddings += position_embeddings
if self.use_task_id:
if task_type_ids is None:
task_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
task_type_embeddings = self.task_type_embeddings(task_type_ids)
embeddings += task_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
|
class ErnieEmbeddings(nn.Module):
'''Construct the embeddings from word, position and token_type embeddings.'''
def __init__(self, config):
pass
def forward(self, input_ids: Optional[torch.LongTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, task_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, past_key_values_length: int=0) -> torch.Tensor:
pass
| 3
| 1
| 35
| 4
| 28
| 4
| 6
| 0.14
| 1
| 3
| 0
| 0
| 2
| 8
| 2
| 12
| 74
| 10
| 56
| 27
| 45
| 8
| 42
| 19
| 39
| 9
| 1
| 2
| 11
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.