id
int64 0
328k
| repository_name
stringlengths 7
58
| file_path
stringlengths 9
302
| class_name
stringlengths 5
256
| human_written_code
stringlengths 16
2.16M
| class_skeleton
stringlengths 18
1.49M
⌀ | total_program_units
int64 1
1.76k
| total_doc_str
int64 0
771
| AvgCountLine
float64 0
7.89k
| AvgCountLineBlank
float64 0
297
| AvgCountLineCode
float64 0
7.89k
| AvgCountLineComment
float64 0
7.89k
| AvgCyclomatic
float64 0
130
| CommentToCodeRatio
float64 0
168
| CountClassBase
float64 0
40
| CountClassCoupled
float64 0
583
| CountClassCoupledModified
float64 0
575
| CountClassDerived
float64 0
5.35k
| CountDeclInstanceMethod
float64 0
529
| CountDeclInstanceVariable
float64 0
296
| CountDeclMethod
float64 0
599
| CountDeclMethodAll
float64 0
1.12k
| CountLine
float64 1
40.4k
| CountLineBlank
float64 0
8.16k
| CountLineCode
float64 1
25.7k
| CountLineCodeDecl
float64 1
8.15k
| CountLineCodeExe
float64 0
24.2k
| CountLineComment
float64 0
16.5k
| CountStmt
float64 1
9.71k
| CountStmtDecl
float64 1
8.15k
| CountStmtExe
float64 0
9.69k
| MaxCyclomatic
float64 0
759
| MaxInheritanceTree
float64 0
16
| MaxNesting
float64 0
34
| SumCyclomatic
float64 0
2.9k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2,500
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/fastspeech2_conformer/modeling_fastspeech2_conformer.py
|
transformers.models.fastspeech2_conformer.modeling_fastspeech2_conformer.FastSpeech2ConformerConvolutionModule
|
from .configuration_fastspeech2_conformer import FastSpeech2ConformerConfig, FastSpeech2ConformerHifiGanConfig, FastSpeech2ConformerWithHifiGanConfig
from torch import nn
import torch
class FastSpeech2ConformerConvolutionModule(nn.Module):
def __init__(self, config: FastSpeech2ConformerConfig, module_config):
super().__init__()
channels = config.hidden_size
kernel_size = module_config['kernel_size']
self.pointwise_conv1 = nn.Conv1d(channels, 2 * channels, kernel_size=1, stride=1, padding=0, bias=True)
self.depthwise_conv = nn.Conv1d(channels, channels, kernel_size, stride=1, padding=(kernel_size - 1) // 2, groups=channels, bias=True)
self.norm = nn.BatchNorm1d(channels)
self.pointwise_conv2 = nn.Conv1d(channels, channels, kernel_size=1, stride=1, padding=0, bias=True)
def forward(self, hidden_states):
"""
Compute convolution module.
Args:
hidden_states (`torch.Tensor` of shape `(batch, time, channels)`): Input tensor.
Returns:
`torch.Tensor`: Output tensor of shape `(batch, time, channels)`.
"""
hidden_states = hidden_states.transpose(1, 2)
hidden_states = self.pointwise_conv1(hidden_states)
hidden_states = nn.functional.glu(hidden_states, dim=1)
hidden_states = self.depthwise_conv(hidden_states)
hidden_states = self.norm(hidden_states)
hidden_states = hidden_states * torch.sigmoid(hidden_states)
hidden_states = self.pointwise_conv2(hidden_states)
return hidden_states.transpose(1, 2)
|
class FastSpeech2ConformerConvolutionModule(nn.Module):
def __init__(self, config: FastSpeech2ConformerConfig, module_config):
pass
def forward(self, hidden_states):
'''
Compute convolution module.
Args:
hidden_states (`torch.Tensor` of shape `(batch, time, channels)`): Input tensor.
Returns:
`torch.Tensor`: Output tensor of shape `(batch, time, channels)`.
'''
pass
| 3
| 1
| 20
| 4
| 10
| 6
| 1
| 0.6
| 1
| 2
| 1
| 0
| 2
| 4
| 2
| 12
| 41
| 9
| 20
| 9
| 17
| 12
| 18
| 9
| 15
| 1
| 1
| 0
| 2
|
2,501
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/fastspeech2_conformer/modeling_fastspeech2_conformer.py
|
transformers.models.fastspeech2_conformer.modeling_fastspeech2_conformer.FastSpeech2ConformerDurationPredictor
|
import torch
from torch import nn
from .configuration_fastspeech2_conformer import FastSpeech2ConformerConfig, FastSpeech2ConformerHifiGanConfig, FastSpeech2ConformerWithHifiGanConfig
class FastSpeech2ConformerDurationPredictor(nn.Module):
"""
Duration predictor module.
This is a module of duration predictor described in the paper 'FastSpeech: Fast, Robust and Controllable Text to
Speech' https://huggingface.co/papers/1905.09263 The duration predictor predicts a duration of each frame in log domain
from the hidden embeddings of encoder.
Note:
The calculation domain of outputs is different between in `forward` and in `inference`. In `forward`, the
outputs are calculated in log domain but in `inference`, those are calculated in linear domain.
"""
def __init__(self, config: FastSpeech2ConformerConfig):
super().__init__()
self.conv_layers = nn.ModuleList()
self.log_domain_offset = 1.0
for layer_idx in range(config.duration_predictor_layers):
num_chans = config.duration_predictor_channels
input_channels = config.hidden_size if layer_idx == 0 else num_chans
layer = FastSpeech2ConformerPredictorLayer(input_channels, num_chans, config.duration_predictor_kernel_size, config.duration_predictor_dropout_rate)
self.conv_layers.append(layer)
self.linear = nn.Linear(config.duration_predictor_channels, 1)
def forward(self, encoder_hidden_states):
"""
Args:
hidden_states (`torch.Tensor` of shape `(batch_size, max_text_length, input_dim)`):
Batch of input sequences.
padding_masks (`torch.ByteTensor` of shape `(batch_size, max_text_length)`, *optional*):
Batch of masks indicating padded part.
Returns:
`torch.Tensor`: Batch of predicted durations in log domain `(batch_size, max_text_length)`.
"""
hidden_states = encoder_hidden_states.transpose(1, -1)
for layer in self.conv_layers:
hidden_states = layer(hidden_states)
hidden_states = self.linear(hidden_states.transpose(1, -1)).squeeze(-1)
if not self.training:
hidden_states = torch.clamp(torch.round(hidden_states.exp() - self.log_domain_offset), min=0).long()
return hidden_states
|
class FastSpeech2ConformerDurationPredictor(nn.Module):
'''
Duration predictor module.
This is a module of duration predictor described in the paper 'FastSpeech: Fast, Robust and Controllable Text to
Speech' https://huggingface.co/papers/1905.09263 The duration predictor predicts a duration of each frame in log domain
from the hidden embeddings of encoder.
Note:
The calculation domain of outputs is different between in `forward` and in `inference`. In `forward`, the
outputs are calculated in log domain but in `inference`, those are calculated in linear domain.
'''
def __init__(self, config: FastSpeech2ConformerConfig):
pass
def forward(self, encoder_hidden_states):
'''
Args:
hidden_states (`torch.Tensor` of shape `(batch_size, max_text_length, input_dim)`):
Batch of input sequences.
padding_masks (`torch.ByteTensor` of shape `(batch_size, max_text_length)`, *optional*):
Batch of masks indicating padded part.
Returns:
`torch.Tensor`: Batch of predicted durations in log domain `(batch_size, max_text_length)`.
'''
pass
| 3
| 2
| 21
| 4
| 12
| 6
| 3
| 0.88
| 1
| 4
| 2
| 0
| 2
| 3
| 2
| 12
| 57
| 12
| 24
| 12
| 21
| 21
| 19
| 12
| 16
| 3
| 1
| 1
| 6
|
2,502
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/fastspeech2_conformer/modeling_fastspeech2_conformer.py
|
transformers.models.fastspeech2_conformer.modeling_fastspeech2_conformer.FastSpeech2ConformerEncoder
|
from ...modeling_outputs import BaseModelOutput
from .configuration_fastspeech2_conformer import FastSpeech2ConformerConfig, FastSpeech2ConformerHifiGanConfig, FastSpeech2ConformerWithHifiGanConfig
import torch
from torch import nn
from typing import Optional, Union
class FastSpeech2ConformerEncoder(nn.Module):
"""
FastSpeech2ConformerEncoder encoder module.
Args:
config (`FastSpeech2ConformerConfig`):
FastSpeech2ConformerConfig instance.
module_config (`dict`):
Dictionary containing the encoder or decoder module configuration from the `FastSpeech2ConformerConfig`.
use_encoder_input_layer (`bool`, *optional*, defaults to `False`):
Input layer type.
"""
def __init__(self, config: FastSpeech2ConformerConfig, module_config, use_encoder_input_layer=False):
super().__init__()
self.embed = None
if use_encoder_input_layer:
self.embed = nn.Embedding(num_embeddings=config.vocab_size, embedding_dim=config.hidden_size, padding_idx=0)
self.pos_enc = FastSpeech2ConformerRelPositionalEncoding(config, module_config)
self.conformer_layers = nn.ModuleList([FastSpeech2ConformerEncoderLayer(config, module_config) for _ in range(module_config['layers'])])
def forward(self, input_tensor: torch.LongTensor, attention_mask: Optional[bool]=None, output_hidden_states: Optional[bool]=None, output_attentions: Optional[bool]=False, return_dict: Optional[bool]=None):
"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
provide it.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
Returns:
`torch.Tensor`:
Output tensor of shape `(batch, time, attention_dim)`.
"""
feature_representation = input_tensor
if self.embed is not None:
feature_representation = self.embed(feature_representation)
hidden_states, pos_emb = self.pos_enc(feature_representation)
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
for conformer_layer in self.conformer_layers:
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_outputs = conformer_layer(hidden_states, pos_emb, attention_mask, output_attentions)
hidden_states = layer_outputs[0]
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple((v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None))
return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions)
|
class FastSpeech2ConformerEncoder(nn.Module):
'''
FastSpeech2ConformerEncoder encoder module.
Args:
config (`FastSpeech2ConformerConfig`):
FastSpeech2ConformerConfig instance.
module_config (`dict`):
Dictionary containing the encoder or decoder module configuration from the `FastSpeech2ConformerConfig`.
use_encoder_input_layer (`bool`, *optional*, defaults to `False`):
Input layer type.
'''
def __init__(self, config: FastSpeech2ConformerConfig, module_config, use_encoder_input_layer=False):
pass
def forward(self, input_tensor: torch.LongTensor, attention_mask: Optional[bool]=None, output_hidden_states: Optional[bool]=None, output_attentions: Optional[bool]=False, return_dict: Optional[bool]=None):
'''
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
provide it.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
Returns:
`torch.Tensor`:
Output tensor of shape `(batch, time, attention_dim)`.
'''
pass
| 3
| 2
| 42
| 7
| 22
| 13
| 6
| 0.8
| 1
| 8
| 4
| 0
| 2
| 3
| 2
| 12
| 98
| 17
| 45
| 25
| 30
| 36
| 27
| 12
| 24
| 9
| 1
| 2
| 11
|
2,503
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/fastspeech2_conformer/modeling_fastspeech2_conformer.py
|
transformers.models.fastspeech2_conformer.modeling_fastspeech2_conformer.FastSpeech2ConformerEncoderLayer
|
import torch
from .configuration_fastspeech2_conformer import FastSpeech2ConformerConfig, FastSpeech2ConformerHifiGanConfig, FastSpeech2ConformerWithHifiGanConfig
from typing import Optional, Union
from torch import nn
class FastSpeech2ConformerEncoderLayer(nn.Module):
def __init__(self, config: FastSpeech2ConformerConfig, module_config):
super().__init__()
self.self_attn = FastSpeech2ConformerAttention(config, module_config)
self.feed_forward = FastSpeech2ConformerMultiLayeredConv1d(config, module_config)
self.macaron_style = config.use_macaron_style_in_conformer
if self.macaron_style:
self.feed_forward_macaron = FastSpeech2ConformerMultiLayeredConv1d(config, module_config)
self.ff_macaron_layer_norm = nn.LayerNorm(config.hidden_size)
self.ff_scale = 0.5
else:
self.ff_scale = 1.0
self.use_cnn_module = config.use_cnn_in_conformer
if self.use_cnn_module:
self.conv_module = FastSpeech2ConformerConvolutionModule(config, module_config)
self.conv_layer_norm = nn.LayerNorm(config.hidden_size)
self.final_layer_norm = nn.LayerNorm(config.hidden_size)
self.ff_layer_norm = nn.LayerNorm(config.hidden_size)
self.self_attn_layer_norm = nn.LayerNorm(config.hidden_size)
self.dropout = nn.Dropout(module_config['dropout_rate'])
self.size = config.hidden_size
self.normalize_before = module_config['normalize_before']
self.concat_after = module_config['concat_after']
if self.concat_after:
self.concat_linear = nn.Linear(config.hidden_size + config.hidden_size, config.hidden_size)
def forward(self, hidden_states: torch.Tensor, pos_emb: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[torch.Tensor]=False):
"""
Compute encoded features.
Args:
hidden_states (`torch.Tensor` of shape `(batch, time, size)`): Input tensor.
pos_emb (`torch.Tensor` of shape `(1, time, size)`): Positional embeddings tensor.
attention_mask (`torch.Tensor` of shape `(batch, time)`): Attention mask tensor for the input.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
Returns:
`torch.Tensor`: Output tensor of shape `(batch, time, size)`.
"""
if self.macaron_style:
residual = hidden_states
if self.normalize_before:
hidden_states = self.ff_macaron_layer_norm(hidden_states)
hidden_states = residual + self.ff_scale * self.dropout(self.feed_forward_macaron(hidden_states))
if not self.normalize_before:
hidden_states = self.ff_macaron_layer_norm(hidden_states)
residual = hidden_states
if self.normalize_before:
hidden_states = self.self_attn_layer_norm(hidden_states)
attention_output, attention_scores = self.self_attn(hidden_states, attention_mask=attention_mask, pos_emb=pos_emb, output_attentions=output_attentions)
if self.concat_after:
x_concat = torch.cat((hidden_states, attention_output), dim=-1)
hidden_states = self.concat_linear(x_concat)
hidden_states = residual + hidden_states
else:
hidden_states = self.dropout(attention_output)
hidden_states = residual + hidden_states
if not self.normalize_before:
hidden_states = self.self_attn_layer_norm(hidden_states)
if self.use_cnn_module:
residual = hidden_states
if self.normalize_before:
hidden_states = self.conv_layer_norm(hidden_states)
hidden_states = self.conv_module(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = residual + hidden_states
if not self.normalize_before:
hidden_states = self.conv_layer_norm(hidden_states)
residual = hidden_states
if self.normalize_before:
hidden_states = self.ff_layer_norm(hidden_states)
hidden_states = self.feed_forward(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = residual + self.ff_scale * hidden_states
if not self.normalize_before:
hidden_states = self.ff_layer_norm(hidden_states)
if self.conv_module is not None:
hidden_states = self.final_layer_norm(hidden_states)
outputs = (hidden_states,)
if output_attentions:
outputs += (attention_scores,)
return outputs
|
class FastSpeech2ConformerEncoderLayer(nn.Module):
def __init__(self, config: FastSpeech2ConformerConfig, module_config):
pass
def forward(self, hidden_states: torch.Tensor, pos_emb: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[torch.Tensor]=False):
'''
Compute encoded features.
Args:
hidden_states (`torch.Tensor` of shape `(batch, time, size)`): Input tensor.
pos_emb (`torch.Tensor` of shape `(1, time, size)`): Positional embeddings tensor.
attention_mask (`torch.Tensor` of shape `(batch, time)`): Attention mask tensor for the input.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
Returns:
`torch.Tensor`: Output tensor of shape `(batch, time, size)`.
'''
pass
| 3
| 1
| 57
| 9
| 38
| 10
| 9
| 0.25
| 1
| 6
| 4
| 0
| 2
| 17
| 2
| 12
| 115
| 19
| 77
| 30
| 68
| 19
| 67
| 24
| 64
| 14
| 1
| 2
| 18
|
2,504
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/fastspeech2_conformer/modeling_fastspeech2_conformer.py
|
transformers.models.fastspeech2_conformer.modeling_fastspeech2_conformer.FastSpeech2ConformerHifiGan
|
from torch import nn
from ...utils import ModelOutput, auto_docstring, logging
from .configuration_fastspeech2_conformer import FastSpeech2ConformerConfig, FastSpeech2ConformerHifiGanConfig, FastSpeech2ConformerWithHifiGanConfig
from ...modeling_utils import PreTrainedModel
import torch
@auto_docstring(custom_intro='\n HiFi-GAN vocoder.\n ')
class FastSpeech2ConformerHifiGan(PreTrainedModel):
config: FastSpeech2ConformerHifiGanConfig
main_input_name = 'spectrogram'
def __init__(self, config: FastSpeech2ConformerHifiGanConfig):
super().__init__(config)
self.num_kernels = len(config.resblock_kernel_sizes)
self.num_upsamples = len(config.upsample_rates)
self.conv_pre = nn.Conv1d(config.model_in_dim, config.upsample_initial_channel, kernel_size=7, stride=1, padding=3)
self.upsampler = nn.ModuleList()
for i, (upsample_rate, kernel_size) in enumerate(zip(config.upsample_rates, config.upsample_kernel_sizes)):
self.upsampler.append(nn.ConvTranspose1d(config.upsample_initial_channel // 2 ** i, config.upsample_initial_channel // 2 ** (i + 1), kernel_size=kernel_size, stride=upsample_rate, padding=(kernel_size - upsample_rate) // 2))
self.resblocks = nn.ModuleList()
for i in range(len(self.upsampler)):
channels = config.upsample_initial_channel // 2 ** (i + 1)
for kernel_size, dilation in zip(config.resblock_kernel_sizes, config.resblock_dilation_sizes):
self.resblocks.append(HifiGanResidualBlock(channels, kernel_size, dilation, config.leaky_relu_slope))
self.conv_post = nn.Conv1d(channels, 1, kernel_size=7, stride=1, padding=3)
self.register_buffer('mean', torch.zeros(config.model_in_dim))
self.register_buffer('scale', torch.ones(config.model_in_dim))
self.post_init()
def _init_weights(self, module: nn.Module):
"""Initialize the weights."""
if isinstance(module, (nn.Conv1d, nn.ConvTranspose1d)):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
def apply_weight_norm(self):
weight_norm = nn.utils.weight_norm
if hasattr(nn.utils.parametrizations, 'weight_norm'):
weight_norm = nn.utils.parametrizations.weight_norm
weight_norm(self.conv_pre)
for layer in self.upsampler:
weight_norm(layer)
for layer in self.resblocks:
layer.apply_weight_norm()
weight_norm(self.conv_post)
def remove_weight_norm(self):
nn.utils.remove_weight_norm(self.conv_pre)
for layer in self.upsampler:
nn.utils.remove_weight_norm(layer)
for layer in self.resblocks:
layer.remove_weight_norm()
nn.utils.remove_weight_norm(self.conv_post)
@auto_docstring(custom_intro='\n Converts a log-mel spectrogram into a speech waveform. Passing a batch of log-mel spectrograms returns a batch\n of speech waveforms. Passing a single, un-batched log-mel spectrogram returns a single, un-batched speech\n waveform.\n ')
def forward(self, spectrogram: torch.FloatTensor) -> torch.FloatTensor:
"""
spectrogram (`torch.FloatTensor`):
Tensor containing the log-mel spectrograms. Can be batched and of shape `(batch_size, sequence_length,
config.model_in_dim)`, or un-batched and of shape `(sequence_length, config.model_in_dim)`.
Returns:
`torch.FloatTensor`: Tensor containing the speech waveform. If the input spectrogram is batched, will be of
shape `(batch_size, num_frames,)`. If un-batched, will be of shape `(num_frames,)`.
"""
if self.config.normalize_before:
spectrogram = (spectrogram - self.mean) / self.scale
is_batched = spectrogram.dim() == 3
if not is_batched:
spectrogram = spectrogram.unsqueeze(0)
hidden_states = spectrogram.transpose(2, 1)
hidden_states = self.conv_pre(hidden_states)
for i in range(self.num_upsamples):
hidden_states = nn.functional.leaky_relu(hidden_states, self.config.leaky_relu_slope)
hidden_states = self.upsampler[i](hidden_states)
res_state = self.resblocks[i * self.num_kernels](hidden_states)
for j in range(1, self.num_kernels):
res_state += self.resblocks[i * self.num_kernels + j](hidden_states)
hidden_states = res_state / self.num_kernels
hidden_states = nn.functional.leaky_relu(hidden_states)
hidden_states = self.conv_post(hidden_states)
hidden_states = torch.tanh(hidden_states)
if not is_batched:
waveform = hidden_states.squeeze(0).transpose(1, 0).view(-1)
else:
waveform = hidden_states.squeeze(1)
return waveform
|
@auto_docstring(custom_intro='\n HiFi-GAN vocoder.\n ')
class FastSpeech2ConformerHifiGan(PreTrainedModel):
def __init__(self, config: FastSpeech2ConformerHifiGanConfig):
pass
def _init_weights(self, module: nn.Module):
'''Initialize the weights.'''
pass
def apply_weight_norm(self):
pass
def remove_weight_norm(self):
pass
@auto_docstring(custom_intro='\n Converts a log-mel spectrogram into a speech waveform. Passing a batch of log-mel spectrograms returns a batch\n of speech waveforms. Passing a single, un-batched log-mel spectrogram returns a single, un-batched speech\n waveform.\n ')
def forward(self, spectrogram: torch.FloatTensor) -> torch.FloatTensor:
'''
spectrogram (`torch.FloatTensor`):
Tensor containing the log-mel spectrograms. Can be batched and of shape `(batch_size, sequence_length,
config.model_in_dim)`, or un-batched and of shape `(sequence_length, config.model_in_dim)`.
Returns:
`torch.FloatTensor`: Tensor containing the speech waveform. If the input spectrogram is batched, will be of
shape `(batch_size, num_frames,)`. If un-batched, will be of shape `(num_frames,)`.
'''
pass
| 8
| 2
| 21
| 3
| 15
| 3
| 4
| 0.2
| 1
| 6
| 2
| 0
| 5
| 6
| 5
| 5
| 115
| 20
| 79
| 26
| 73
| 16
| 64
| 26
| 58
| 6
| 1
| 2
| 20
|
2,505
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/fastspeech2_conformer/modeling_fastspeech2_conformer.py
|
transformers.models.fastspeech2_conformer.modeling_fastspeech2_conformer.FastSpeech2ConformerLoss
|
from .configuration_fastspeech2_conformer import FastSpeech2ConformerConfig, FastSpeech2ConformerHifiGanConfig, FastSpeech2ConformerWithHifiGanConfig
from torch import nn
import torch
class FastSpeech2ConformerLoss(nn.Module):
def __init__(self, config: FastSpeech2ConformerConfig):
super().__init__()
use_masking = config.use_masking
use_weighted_masking = config.use_weighted_masking
if use_masking and use_weighted_masking:
raise ValueError('Either use_masking or use_weighted_masking can be True, but not both.')
self.use_masking = use_masking
self.use_weighted_masking = use_weighted_masking
reduction = 'none' if self.use_weighted_masking else 'mean'
self.l1_criterion = nn.L1Loss(reduction=reduction)
self.mse_criterion = nn.MSELoss(reduction=reduction)
self.duration_criterion = nn.MSELoss(reduction=reduction)
self.log_domain_offset = 1.0
def forward(self, outputs_after_postnet, outputs_before_postnet, duration_outputs, pitch_outputs, energy_outputs, spectrogram_labels, duration_labels, pitch_labels, energy_labels, duration_mask, spectrogram_mask):
"""
Args:
outputs_after_postnet (`torch.Tensor` of shape `(batch_size, max_spectrogram_length, num_mel_bins)`):
Batch of outputs after postnet.
outputs_before_postnet (`torch.Tensor` of shape `(batch_size, max_spectrogram_length, num_mel_bins)`):
Batch of outputs before postnet.
duration_outputs (`torch.LongTensor` of shape `(batch_size, max_text_length)`):
Batch of outputs of duration predictor.
pitch_outputs (`torch.Tensor` of shape `(batch_size, max_text_length, 1)`):
Batch of outputs of pitch predictor.
energy_outputs (`torch.Tensor` of shape `(batch_size, max_text_length, 1)`):
Batch of outputs of energy predictor.
spectrogram_labels (`torch.Tensor` of shape `(batch_size, max_spectrogram_length, num_mel_bins)`):
Batch of target features.
duration_labels (`torch.LongTensor` of shape `(batch_size, max_text_length)`): Batch of durations.
pitch_labels (`torch.Tensor` of shape `(batch_size, max_text_length, 1)`):
Batch of target token-averaged pitch.
energy_labels (`torch.Tensor` of shape `(batch_size, max_text_length, 1)`):
Batch of target token-averaged energy.
duration_mask (`torch.LongTensor`):
Mask used to discern which values the duration loss should be calculated for.
spectrogram_mask (`torch.LongTensor`):
Mask used to discern which values the spectrogam loss should be calculated for.
Returns:
`tuple(torch.FloatTensor)`: Tuple of tensors containing, in order, the L1 loss value, duration predictor
loss value, pitch predictor loss value, and energy predictor loss value.
"""
pitch_and_energy_masks = duration_mask.unsqueeze(-1)
if self.use_masking:
outputs_before_postnet = outputs_before_postnet.masked_select(spectrogram_mask)
if outputs_after_postnet is not None:
outputs_after_postnet = outputs_after_postnet.masked_select(spectrogram_mask)
spectrogram_labels = spectrogram_labels.masked_select(spectrogram_mask)
duration_outputs = duration_outputs.masked_select(duration_mask)
duration_labels = duration_labels.masked_select(duration_mask)
pitch_outputs = pitch_outputs.masked_select(pitch_and_energy_masks)
energy_outputs = energy_outputs.masked_select(pitch_and_energy_masks)
pitch_labels = pitch_labels.masked_select(pitch_and_energy_masks)
energy_labels = energy_labels.masked_select(pitch_and_energy_masks)
l1_loss = self.l1_criterion(outputs_before_postnet, spectrogram_labels)
if outputs_after_postnet is not None:
l1_loss = l1_loss + self.l1_criterion(outputs_after_postnet, spectrogram_labels)
duration_labels = torch.log(duration_labels.float() + self.log_domain_offset)
duration_loss = self.duration_criterion(duration_outputs, duration_labels)
pitch_loss = self.mse_criterion(pitch_outputs, pitch_labels)
energy_loss = self.mse_criterion(energy_outputs, energy_labels)
if self.use_weighted_masking:
spectrogram_mask = nn.functional.pad(spectrogram_mask.transpose(1, 2), [0, spectrogram_labels.size(1) - spectrogram_mask.size(1), 0, 0, 0, 0], value=False).transpose(1, 2)
out_weights = spectrogram_mask.float() / spectrogram_mask.sum(dim=1, keepdim=True).float()
out_weights /= spectrogram_labels.size(0) * spectrogram_labels.size(2)
duration_weights = duration_mask.float() / duration_mask.sum(dim=1, keepdim=True).float()
duration_weights /= duration_labels.size(0)
l1_loss = l1_loss.mul(out_weights).masked_select(spectrogram_mask).sum()
duration_loss = duration_loss.mul(duration_weights).masked_select(duration_mask).sum()
pitch_weights = duration_weights.unsqueeze(-1)
pitch_loss = pitch_loss.mul(pitch_weights).masked_select(pitch_and_energy_masks).sum()
energy_loss = energy_loss.mul(pitch_weights).masked_select(pitch_and_energy_masks).sum()
return l1_loss + duration_loss + pitch_loss + energy_loss
|
class FastSpeech2ConformerLoss(nn.Module):
def __init__(self, config: FastSpeech2ConformerConfig):
pass
def forward(self, outputs_after_postnet, outputs_before_postnet, duration_outputs, pitch_outputs, energy_outputs, spectrogram_labels, duration_labels, pitch_labels, energy_labels, duration_mask, spectrogram_mask):
'''
Args:
outputs_after_postnet (`torch.Tensor` of shape `(batch_size, max_spectrogram_length, num_mel_bins)`):
Batch of outputs after postnet.
outputs_before_postnet (`torch.Tensor` of shape `(batch_size, max_spectrogram_length, num_mel_bins)`):
Batch of outputs before postnet.
duration_outputs (`torch.LongTensor` of shape `(batch_size, max_text_length)`):
Batch of outputs of duration predictor.
pitch_outputs (`torch.Tensor` of shape `(batch_size, max_text_length, 1)`):
Batch of outputs of pitch predictor.
energy_outputs (`torch.Tensor` of shape `(batch_size, max_text_length, 1)`):
Batch of outputs of energy predictor.
spectrogram_labels (`torch.Tensor` of shape `(batch_size, max_spectrogram_length, num_mel_bins)`):
Batch of target features.
duration_labels (`torch.LongTensor` of shape `(batch_size, max_text_length)`): Batch of durations.
pitch_labels (`torch.Tensor` of shape `(batch_size, max_text_length, 1)`):
Batch of target token-averaged pitch.
energy_labels (`torch.Tensor` of shape `(batch_size, max_text_length, 1)`):
Batch of target token-averaged energy.
duration_mask (`torch.LongTensor`):
Mask used to discern which values the duration loss should be calculated for.
spectrogram_mask (`torch.LongTensor`):
Mask used to discern which values the spectrogam loss should be calculated for.
Returns:
`tuple(torch.FloatTensor)`: Tuple of tensors containing, in order, the L1 loss value, duration predictor
loss value, pitch predictor loss value, and energy predictor loss value.
'''
pass
| 3
| 1
| 53
| 6
| 31
| 16
| 4
| 0.51
| 1
| 3
| 1
| 0
| 2
| 6
| 2
| 12
| 108
| 13
| 63
| 33
| 47
| 32
| 46
| 20
| 43
| 5
| 1
| 2
| 8
|
2,506
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/fastspeech2_conformer/modeling_fastspeech2_conformer.py
|
transformers.models.fastspeech2_conformer.modeling_fastspeech2_conformer.FastSpeech2ConformerModel
|
from typing import Optional, Union
from ...utils import ModelOutput, auto_docstring, logging
import torch
from torch import nn
from .configuration_fastspeech2_conformer import FastSpeech2ConformerConfig, FastSpeech2ConformerHifiGanConfig, FastSpeech2ConformerWithHifiGanConfig
@auto_docstring(custom_intro='\n FastSpeech2Conformer Model.\n ')
class FastSpeech2ConformerModel(FastSpeech2ConformerPreTrainedModel):
"""
FastSpeech 2 module.
This is a module of FastSpeech 2 described in 'FastSpeech 2: Fast and High-Quality End-to-End Text to Speech'
https://huggingface.co/papers/2006.04558. Instead of quantized pitch and energy, we use token-averaged value introduced in
FastPitch: Parallel Text-to-speech with Pitch Prediction. The encoder and decoder are Conformers instead of regular
Transformers.
"""
def __init__(self, config: FastSpeech2ConformerConfig):
super().__init__(config)
self.config = config
self.vocab_size = config.vocab_size
self.num_mel_bins = config.num_mel_bins
self.hidden_size = config.hidden_size
self.reduction_factor = config.reduction_factor
self.stop_gradient_from_pitch_predictor = config.stop_gradient_from_pitch_predictor
self.stop_gradient_from_energy_predictor = config.stop_gradient_from_energy_predictor
self.multilingual_model = config.num_languages is not None and config.num_languages > 1
if self.multilingual_model:
self.language_id_embedding = torch.nn.Embedding(config.num_languages, self.hidden_size)
self.multispeaker_model = config.num_speakers is not None and config.num_speakers > 1
if self.multispeaker_model:
self.speaker_id_embedding = torch.nn.Embedding(config.num_speakers, config.hidden_size)
self.speaker_embed_dim = config.speaker_embed_dim
if self.speaker_embed_dim:
self.projection = nn.Linear(config.hidden_size + self.speaker_embed_dim, config.hidden_size)
self.encoder = FastSpeech2ConformerEncoder(config, config.encoder_config, use_encoder_input_layer=True)
self.duration_predictor = FastSpeech2ConformerDurationPredictor(config)
self.pitch_predictor = FastSpeech2ConformerVariancePredictor(config, num_layers=config.pitch_predictor_layers, num_chans=config.pitch_predictor_channels, kernel_size=config.pitch_predictor_kernel_size, dropout_rate=config.pitch_predictor_dropout)
self.pitch_embed = FastSpeech2ConformerVarianceEmbedding(out_channels=self.hidden_size, kernel_size=config.pitch_embed_kernel_size, padding=(config.pitch_embed_kernel_size - 1) // 2, dropout_rate=config.pitch_embed_dropout)
self.energy_predictor = FastSpeech2ConformerVariancePredictor(config, num_layers=config.energy_predictor_layers, num_chans=config.energy_predictor_channels, kernel_size=config.energy_predictor_kernel_size, dropout_rate=config.energy_predictor_dropout)
self.energy_embed = FastSpeech2ConformerVarianceEmbedding(out_channels=self.hidden_size, kernel_size=config.energy_embed_kernel_size, padding=(config.energy_embed_kernel_size - 1) // 2, dropout_rate=config.energy_embed_dropout)
self.decoder = FastSpeech2ConformerEncoder(config, config.decoder_config, use_encoder_input_layer=False)
self.speech_decoder_postnet = FastSpeech2ConformerSpeechDecoderPostnet(config)
self.criterion = FastSpeech2ConformerLoss(config)
self.post_init()
@auto_docstring
def forward(self, input_ids: torch.LongTensor, attention_mask: Optional[torch.LongTensor]=None, spectrogram_labels: Optional[torch.FloatTensor]=None, duration_labels: Optional[torch.LongTensor]=None, pitch_labels: Optional[torch.FloatTensor]=None, energy_labels: Optional[torch.FloatTensor]=None, speaker_ids: Optional[torch.LongTensor]=None, lang_ids: Optional[torch.LongTensor]=None, speaker_embedding: Optional[torch.FloatTensor]=None, return_dict: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None) -> Union[tuple, FastSpeech2ConformerModelOutput]:
"""
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Input sequence of text vectors.
spectrogram_labels (`torch.FloatTensor` of shape `(batch_size, max_spectrogram_length, num_mel_bins)`, *optional*, defaults to `None`):
Batch of padded target features.
duration_labels (`torch.LongTensor` of shape `(batch_size, sequence_length + 1)`, *optional*, defaults to `None`):
Batch of padded durations.
pitch_labels (`torch.FloatTensor` of shape `(batch_size, sequence_length + 1, 1)`, *optional*, defaults to `None`):
Batch of padded token-averaged pitch.
energy_labels (`torch.FloatTensor` of shape `(batch_size, sequence_length + 1, 1)`, *optional*, defaults to `None`):
Batch of padded token-averaged energy.
speaker_ids (`torch.LongTensor` of shape `(batch_size, 1)`, *optional*, defaults to `None`):
Speaker ids used to condition features of speech output by the model.
lang_ids (`torch.LongTensor` of shape `(batch_size, 1)`, *optional*, defaults to `None`):
Language ids used to condition features of speech output by the model.
speaker_embedding (`torch.FloatTensor` of shape `(batch_size, embedding_dim)`, *optional*, defaults to `None`):
Embedding containing conditioning signals for the features of the speech.
Example:
```python
>>> from transformers import (
... FastSpeech2ConformerTokenizer,
... FastSpeech2ConformerModel,
... FastSpeech2ConformerHifiGan,
... )
>>> tokenizer = FastSpeech2ConformerTokenizer.from_pretrained("espnet/fastspeech2_conformer")
>>> inputs = tokenizer("some text to convert to speech", return_tensors="pt")
>>> input_ids = inputs["input_ids"]
>>> model = FastSpeech2ConformerModel.from_pretrained("espnet/fastspeech2_conformer")
>>> output_dict = model(input_ids, return_dict=True)
>>> spectrogram = output_dict["spectrogram"]
>>> vocoder = FastSpeech2ConformerHifiGan.from_pretrained("espnet/fastspeech2_conformer_hifigan")
>>> waveform = vocoder(spectrogram)
>>> print(waveform.shape)
torch.Size([1, 49664])
```
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
if attention_mask is None:
attention_mask = torch.ones(input_ids.shape, device=input_ids.device)
has_missing_labels = spectrogram_labels is None or duration_labels is None or pitch_labels is None or (energy_labels is None)
if self.training and has_missing_labels:
raise ValueError('All labels must be provided to run in training mode.')
text_masks = attention_mask.unsqueeze(-2)
encoder_outputs = self.encoder(input_ids, text_masks, output_hidden_states=output_hidden_states, output_attentions=output_attentions, return_dict=return_dict)
hidden_states = encoder_outputs[0]
if self.multispeaker_model and speaker_ids is not None:
speaker_id_embeddings = self.speaker_id_embedding(speaker_ids.view(-1))
hidden_states = hidden_states + speaker_id_embeddings.unsqueeze(1)
if self.multilingual_model and lang_ids is not None:
language_id_embbedings = self.language_id_embedding(lang_ids.view(-1))
hidden_states = hidden_states + language_id_embbedings.unsqueeze(1)
if self.speaker_embed_dim is not None and speaker_embedding is not None:
embeddings_expanded = nn.functional.normalize(speaker_embedding).unsqueeze(1).expand(-1, hidden_states.size(1), -1)
hidden_states = self.projection(torch.cat([hidden_states, embeddings_expanded], dim=-1))
duration_mask = ~attention_mask.bool()
if self.stop_gradient_from_pitch_predictor:
pitch_predictions = self.pitch_predictor(hidden_states.detach(), duration_mask.unsqueeze(-1))
else:
pitch_predictions = self.pitch_predictor(hidden_states, duration_mask.unsqueeze(-1))
if self.stop_gradient_from_energy_predictor:
energy_predictions = self.energy_predictor(hidden_states.detach(), duration_mask.unsqueeze(-1))
else:
energy_predictions = self.energy_predictor(hidden_states, duration_mask.unsqueeze(-1))
duration_predictions = self.duration_predictor(hidden_states)
duration_predictions = duration_predictions.masked_fill(duration_mask, 0.0)
if not self.training:
embedded_pitch_curve = self.pitch_embed(pitch_predictions)
embedded_energy_curve = self.energy_embed(energy_predictions)
hidden_states = hidden_states + embedded_energy_curve + embedded_pitch_curve
hidden_states = length_regulator(hidden_states, duration_predictions, self.config.speaking_speed)
else:
embedded_pitch_curve = self.pitch_embed(pitch_labels)
embedded_energy_curve = self.energy_embed(energy_labels)
hidden_states = hidden_states + embedded_energy_curve + embedded_pitch_curve
hidden_states = length_regulator(hidden_states, duration_labels)
if not self.training:
hidden_mask = None
else:
spectrogram_mask = (spectrogram_labels != -100).any(dim=-1)
spectrogram_mask = spectrogram_mask.int()
if self.reduction_factor > 1:
length_dim = spectrogram_mask.shape[1] - spectrogram_mask.shape[1] % self.reduction_factor
spectrogram_mask = spectrogram_mask[:, :, :length_dim]
hidden_mask = spectrogram_mask.unsqueeze(-2)
decoder_outputs = self.decoder(hidden_states, hidden_mask, output_hidden_states=output_hidden_states, output_attentions=output_attentions, return_dict=return_dict)
outputs_before_postnet, outputs_after_postnet = self.speech_decoder_postnet(decoder_outputs[0])
loss = None
if self.training:
loss_duration_mask = ~duration_mask
loss_spectrogram_mask = spectrogram_mask.unsqueeze(-1).bool()
loss = self.criterion(outputs_after_postnet=outputs_after_postnet, outputs_before_postnet=outputs_before_postnet, duration_outputs=duration_predictions, pitch_outputs=pitch_predictions, energy_outputs=energy_predictions, spectrogram_labels=spectrogram_labels, duration_labels=duration_labels, pitch_labels=pitch_labels, energy_labels=energy_labels, duration_mask=loss_duration_mask, spectrogram_mask=loss_spectrogram_mask)
if not return_dict:
postnet_outputs = (outputs_after_postnet,)
audio_feature_predictions = (duration_predictions, pitch_predictions, energy_predictions)
outputs = postnet_outputs + encoder_outputs + decoder_outputs[1:] + audio_feature_predictions
return (loss,) + outputs if loss is not None else outputs
return FastSpeech2ConformerModelOutput(loss=loss, spectrogram=outputs_after_postnet, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, duration_outputs=duration_predictions, pitch_outputs=pitch_predictions, energy_outputs=energy_predictions)
|
@auto_docstring(custom_intro='\n FastSpeech2Conformer Model.\n ')
class FastSpeech2ConformerModel(FastSpeech2ConformerPreTrainedModel):
'''
FastSpeech 2 module.
This is a module of FastSpeech 2 described in 'FastSpeech 2: Fast and High-Quality End-to-End Text to Speech'
https://huggingface.co/papers/2006.04558. Instead of quantized pitch and energy, we use token-averaged value introduced in
FastPitch: Parallel Text-to-speech with Pitch Prediction. The encoder and decoder are Conformers instead of regular
Transformers.
'''
def __init__(self, config: FastSpeech2ConformerConfig):
pass
@auto_docstring
def forward(self, input_ids: torch.LongTensor, attention_mask: Optional[torch.LongTensor]=None, spectrogram_labels: Optional[torch.FloatTensor]=None, duration_labels: Optional[torch.LongTensor]=None, pitch_labels: Optional[torch.FloatTensor]=None, energy_labels: Optional[torch.FloatTensor]=None, speaker_ids: Optional[torch.LongTensor]=None, lang_ids: Optional[torch.LongTensor]=None, speaker_embedding: Optional[torch.FloatTensor]=None, return_dict: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None) -> Union[tuple, FastSpeech2ConformerModelOutput]:
'''
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Input sequence of text vectors.
spectrogram_labels (`torch.FloatTensor` of shape `(batch_size, max_spectrogram_length, num_mel_bins)`, *optional*, defaults to `None`):
Batch of padded target features.
duration_labels (`torch.LongTensor` of shape `(batch_size, sequence_length + 1)`, *optional*, defaults to `None`):
Batch of padded durations.
pitch_labels (`torch.FloatTensor` of shape `(batch_size, sequence_length + 1, 1)`, *optional*, defaults to `None`):
Batch of padded token-averaged pitch.
energy_labels (`torch.FloatTensor` of shape `(batch_size, sequence_length + 1, 1)`, *optional*, defaults to `None`):
Batch of padded token-averaged energy.
speaker_ids (`torch.LongTensor` of shape `(batch_size, 1)`, *optional*, defaults to `None`):
Speaker ids used to condition features of speech output by the model.
lang_ids (`torch.LongTensor` of shape `(batch_size, 1)`, *optional*, defaults to `None`):
Language ids used to condition features of speech output by the model.
speaker_embedding (`torch.FloatTensor` of shape `(batch_size, embedding_dim)`, *optional*, defaults to `None`):
Embedding containing conditioning signals for the features of the speech.
Example:
```python
>>> from transformers import (
... FastSpeech2ConformerTokenizer,
... FastSpeech2ConformerModel,
... FastSpeech2ConformerHifiGan,
... )
>>> tokenizer = FastSpeech2ConformerTokenizer.from_pretrained("espnet/fastspeech2_conformer")
>>> inputs = tokenizer("some text to convert to speech", return_tensors="pt")
>>> input_ids = inputs["input_ids"]
>>> model = FastSpeech2ConformerModel.from_pretrained("espnet/fastspeech2_conformer")
>>> output_dict = model(input_ids, return_dict=True)
>>> spectrogram = output_dict["spectrogram"]
>>> vocoder = FastSpeech2ConformerHifiGan.from_pretrained("espnet/fastspeech2_conformer_hifigan")
>>> waveform = vocoder(spectrogram)
>>> print(waveform.shape)
torch.Size([1, 49664])
```
'''
pass
| 5
| 2
| 135
| 18
| 87
| 30
| 11
| 0.38
| 1
| 11
| 8
| 0
| 2
| 22
| 2
| 4
| 281
| 39
| 175
| 64
| 157
| 67
| 89
| 49
| 86
| 17
| 2
| 2
| 21
|
2,507
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/fastspeech2_conformer/modeling_fastspeech2_conformer.py
|
transformers.models.fastspeech2_conformer.modeling_fastspeech2_conformer.FastSpeech2ConformerModelOutput
|
import torch
from typing import Optional, Union
from ...utils import ModelOutput, auto_docstring, logging
from dataclasses import dataclass
@dataclass
@auto_docstring(custom_intro='\n Output type of [`FastSpeech2ConformerModel`].\n ')
class FastSpeech2ConformerModelOutput(ModelOutput):
"""
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Spectrogram generation loss.
duration_outputs (`torch.LongTensor` of shape `(batch_size, max_text_length + 1)`, *optional*):
Outputs of the duration predictor.
pitch_outputs (`torch.FloatTensor` of shape `(batch_size, max_text_length + 1, 1)`, *optional*):
Outputs of the pitch predictor.
energy_outputs (`torch.FloatTensor` of shape `(batch_size, max_text_length + 1, 1)`, *optional*):
Outputs of the energy predictor.
"""
loss: Optional[torch.FloatTensor] = None
spectrogram: Optional[torch.FloatTensor] = None
encoder_last_hidden_state: Optional[torch.FloatTensor] = None
encoder_hidden_states: Optional[tuple[torch.FloatTensor]] = None
encoder_attentions: Optional[tuple[torch.FloatTensor]] = None
decoder_hidden_states: Optional[tuple[torch.FloatTensor]] = None
decoder_attentions: Optional[tuple[torch.FloatTensor]] = None
duration_outputs: Optional[torch.LongTensor] = None
pitch_outputs: Optional[torch.FloatTensor] = None
energy_outputs: Optional[torch.FloatTensor] = None
|
@dataclass
@auto_docstring(custom_intro='\n Output type of [`FastSpeech2ConformerModel`].\n ')
class FastSpeech2ConformerModelOutput(ModelOutput):
'''
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Spectrogram generation loss.
duration_outputs (`torch.LongTensor` of shape `(batch_size, max_text_length + 1)`, *optional*):
Outputs of the duration predictor.
pitch_outputs (`torch.FloatTensor` of shape `(batch_size, max_text_length + 1, 1)`, *optional*):
Outputs of the pitch predictor.
energy_outputs (`torch.FloatTensor` of shape `(batch_size, max_text_length + 1, 1)`, *optional*):
Outputs of the energy predictor.
'''
pass
| 3
| 1
| 0
| 0
| 0
| 0
| 0
| 3.09
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 52
| 7
| 11
| 11
| 10
| 34
| 11
| 11
| 10
| 0
| 1
| 0
| 0
|
2,508
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/fastspeech2_conformer/modeling_fastspeech2_conformer.py
|
transformers.models.fastspeech2_conformer.modeling_fastspeech2_conformer.FastSpeech2ConformerMultiLayeredConv1d
|
import torch
from .configuration_fastspeech2_conformer import FastSpeech2ConformerConfig, FastSpeech2ConformerHifiGanConfig, FastSpeech2ConformerWithHifiGanConfig
from torch import nn
class FastSpeech2ConformerMultiLayeredConv1d(nn.Module):
"""
Multi-layered conv1d for Transformer block.
This is a module of multi-layered conv1d designed to replace positionwise feed-forward network in Transformer
block, which is introduced in 'FastSpeech: Fast, Robust and Controllable Text to Speech'
https://huggingface.co/papers/1905.09263
"""
def __init__(self, config: FastSpeech2ConformerConfig, module_config):
"""
Initialize FastSpeech2ConformerMultiLayeredConv1d module.
Args:
input_channels (`int`): Number of input channels.
hidden_channels (`int`): Number of hidden channels.
kernel_size (`int`): Kernel size of conv1d.
dropout_rate (`float`): Dropout rate.
"""
super().__init__()
input_channels = config.hidden_size
hidden_channels = module_config['linear_units']
kernel_size = config.positionwise_conv_kernel_size
self.conv1 = nn.Conv1d(input_channels, hidden_channels, kernel_size, stride=1, padding=(kernel_size - 1) // 2)
self.conv2 = nn.Conv1d(hidden_channels, input_channels, kernel_size, stride=1, padding=(kernel_size - 1) // 2)
self.dropout = nn.Dropout(module_config['dropout_rate'])
def forward(self, hidden_states):
"""
Calculate forward propagation.
Args:
hidden_states (torch.Tensor): Batch of input tensors (batch_size, time, input_channels).
Returns:
torch.Tensor: Batch of output tensors (batch_size, time, hidden_channels).
"""
hidden_states = hidden_states.transpose(-1, 1)
hidden_states = self.conv1(hidden_states)
hidden_states = torch.relu(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.conv2(hidden_states)
hidden_states = hidden_states.transpose(-1, 1)
return hidden_states
|
class FastSpeech2ConformerMultiLayeredConv1d(nn.Module):
'''
Multi-layered conv1d for Transformer block.
This is a module of multi-layered conv1d designed to replace positionwise feed-forward network in Transformer
block, which is introduced in 'FastSpeech: Fast, Robust and Controllable Text to Speech'
https://huggingface.co/papers/1905.09263
'''
def __init__(self, config: FastSpeech2ConformerConfig, module_config):
'''
Initialize FastSpeech2ConformerMultiLayeredConv1d module.
Args:
input_channels (`int`): Number of input channels.
hidden_channels (`int`): Number of hidden channels.
kernel_size (`int`): Kernel size of conv1d.
dropout_rate (`float`): Dropout rate.
'''
pass
def forward(self, hidden_states):
'''
Calculate forward propagation.
Args:
hidden_states (torch.Tensor): Batch of input tensors (batch_size, time, input_channels).
Returns:
torch.Tensor: Batch of output tensors (batch_size, time, hidden_channels).
'''
pass
| 3
| 3
| 17
| 2
| 8
| 8
| 1
| 1.24
| 1
| 2
| 1
| 0
| 2
| 3
| 2
| 12
| 44
| 6
| 17
| 9
| 14
| 21
| 17
| 9
| 14
| 1
| 1
| 0
| 2
|
2,509
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/fastspeech2_conformer/modeling_fastspeech2_conformer.py
|
transformers.models.fastspeech2_conformer.modeling_fastspeech2_conformer.FastSpeech2ConformerPreTrainedModel
|
from torch import nn
from .configuration_fastspeech2_conformer import FastSpeech2ConformerConfig, FastSpeech2ConformerHifiGanConfig, FastSpeech2ConformerWithHifiGanConfig
from ...modeling_utils import PreTrainedModel
from ...utils import ModelOutput, auto_docstring, logging
import math
@auto_docstring
class FastSpeech2ConformerPreTrainedModel(PreTrainedModel):
config: FastSpeech2ConformerConfig
base_model_prefix = 'fastspeech2_conformer'
main_input_name = 'input_ids'
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, nn.Linear):
nn.init.normal_(module.weight, std=1.0 / math.sqrt(module.weight.size(1)))
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Conv1d):
nn.init.kaiming_normal_(module.weight)
if module.bias is not None:
key = math.sqrt(module.groups / (module.in_channels * module.kernel_size[0]))
nn.init.uniform_(module.bias, a=-key, b=key)
elif isinstance(module, (nn.LayerNorm, nn.BatchNorm1d)):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
elif isinstance(module, nn.Embedding):
module.weight.data.normal_()
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, FastSpeech2ConformerAttention):
nn.init.xavier_uniform_(module.pos_bias_u)
nn.init.xavier_uniform_(module.pos_bias_v)
def _set_gradient_checkpointing(self, module, value=False):
if isinstance(module, FastSpeech2ConformerEncoder):
module.gradient_checkpointing = value
|
@auto_docstring
class FastSpeech2ConformerPreTrainedModel(PreTrainedModel):
def _init_weights(self, module):
'''Initialize the weights'''
pass
def _set_gradient_checkpointing(self, module, value=False):
pass
| 4
| 1
| 10
| 0
| 10
| 1
| 5
| 0.22
| 1
| 2
| 2
| 1
| 2
| 0
| 2
| 2
| 32
| 4
| 23
| 7
| 20
| 5
| 20
| 7
| 17
| 7
| 1
| 2
| 9
|
2,510
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/fastspeech2_conformer/modeling_fastspeech2_conformer.py
|
transformers.models.fastspeech2_conformer.modeling_fastspeech2_conformer.FastSpeech2ConformerPredictorLayer
|
from torch import nn
class FastSpeech2ConformerPredictorLayer(nn.Module):
def __init__(self, input_channels, num_chans, kernel_size, dropout_rate):
super().__init__()
self.conv = nn.Conv1d(input_channels, num_chans, kernel_size, stride=1, padding=(kernel_size - 1) // 2)
self.activation = nn.ReLU()
self.layer_norm = nn.LayerNorm(num_chans)
self.dropout = nn.Dropout(dropout_rate)
def forward(self, hidden_states):
hidden_states = self.conv(hidden_states)
hidden_states = self.activation(hidden_states)
hidden_states = hidden_states.transpose(1, -1)
hidden_states = self.layer_norm(hidden_states)
hidden_states = hidden_states.transpose(1, -1)
hidden_states = self.dropout(hidden_states)
return hidden_states
|
class FastSpeech2ConformerPredictorLayer(nn.Module):
def __init__(self, input_channels, num_chans, kernel_size, dropout_rate):
pass
def forward(self, hidden_states):
pass
| 3
| 0
| 12
| 2
| 10
| 1
| 1
| 0.05
| 1
| 1
| 0
| 0
| 2
| 4
| 2
| 12
| 26
| 4
| 21
| 7
| 18
| 1
| 15
| 7
| 12
| 1
| 1
| 0
| 2
|
2,511
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/fastspeech2_conformer/modeling_fastspeech2_conformer.py
|
transformers.models.fastspeech2_conformer.modeling_fastspeech2_conformer.FastSpeech2ConformerRelPositionalEncoding
|
from torch import nn
from .configuration_fastspeech2_conformer import FastSpeech2ConformerConfig, FastSpeech2ConformerHifiGanConfig, FastSpeech2ConformerWithHifiGanConfig
import torch
import math
class FastSpeech2ConformerRelPositionalEncoding(nn.Module):
"""
Args:
Relative positional encoding module (new implementation). Details can be found in
https://github.com/espnet/espnet/pull/2816. See : Appendix Batch in https://huggingface.co/papers/1901.02860
config (`FastSpeech2ConformerConfig`):
FastSpeech2ConformerConfig instance.
module_config (`dict`):
Dictionary containing the encoder or decoder module configuration from the `FastSpeech2ConformerConfig`.
"""
def __init__(self, config: FastSpeech2ConformerConfig, module_config):
"""
Construct an PositionalEncoding object.
"""
super().__init__()
self.embed_dim = config.hidden_size
self.input_scale = math.sqrt(self.embed_dim)
self.dropout = nn.Dropout(p=module_config['positional_dropout_rate'])
self.pos_enc = None
self.max_len = 5000
self.extend_pos_enc(torch.tensor(0.0).expand(1, self.max_len))
def extend_pos_enc(self, x):
"""Reset the positional encodings."""
if self.pos_enc is not None:
if self.pos_enc.size(1) >= x.size(1) * 2 - 1:
if self.pos_enc.dtype != x.dtype or self.pos_enc.device != x.device:
self.pos_enc = self.pos_enc.to(dtype=x.dtype, device=x.device)
return
pos_enc_positive = torch.zeros(x.size(1), self.embed_dim)
pos_enc_negative = torch.zeros(x.size(1), self.embed_dim)
position = torch.arange(0, x.size(1), dtype=torch.int64).float().unsqueeze(1)
div_term = torch.exp(torch.arange(0, self.embed_dim, 2, dtype=torch.int64).float() * -(math.log(10000.0) / self.embed_dim))
pos_enc_positive[:, 0::2] = torch.sin(position * div_term)
pos_enc_positive[:, 1::2] = torch.cos(position * div_term)
pos_enc_negative[:, 0::2] = torch.sin(-1 * position * div_term)
pos_enc_negative[:, 1::2] = torch.cos(-1 * position * div_term)
pos_enc_positive = torch.flip(pos_enc_positive, [0]).unsqueeze(0)
pos_enc_negative = pos_enc_negative[1:].unsqueeze(0)
pos_enc = torch.cat([pos_enc_positive, pos_enc_negative], dim=1)
self.pos_enc = pos_enc.to(device=x.device, dtype=x.dtype)
def forward(self, feature_representation):
"""
Args:
feature_representation (`torch.Tensor` of shape (batch_size, time, `*`)):
Input tensor.
Returns:
`torch.Tensor`: Encoded tensor (batch_size, time, `*`).
"""
self.extend_pos_enc(feature_representation)
hidden_states = feature_representation * self.input_scale
center_idx = self.pos_enc.size(1) // 2
pos_emb = self.pos_enc[:, center_idx - hidden_states.size(1) + 1:center_idx + hidden_states.size(1)]
return (self.dropout(hidden_states), self.dropout(pos_emb))
|
class FastSpeech2ConformerRelPositionalEncoding(nn.Module):
'''
Args:
Relative positional encoding module (new implementation). Details can be found in
https://github.com/espnet/espnet/pull/2816. See : Appendix Batch in https://huggingface.co/papers/1901.02860
config (`FastSpeech2ConformerConfig`):
FastSpeech2ConformerConfig instance.
module_config (`dict`):
Dictionary containing the encoder or decoder module configuration from the `FastSpeech2ConformerConfig`.
'''
def __init__(self, config: FastSpeech2ConformerConfig, module_config):
'''
Construct an PositionalEncoding object.
'''
pass
def extend_pos_enc(self, x):
'''Reset the positional encodings.'''
pass
def forward(self, feature_representation):
'''
Args:
feature_representation (`torch.Tensor` of shape (batch_size, time, `*`)):
Input tensor.
Returns:
`torch.Tensor`: Encoded tensor (batch_size, time, `*`).
'''
pass
| 4
| 4
| 18
| 1
| 11
| 6
| 2
| 0.8
| 1
| 2
| 1
| 0
| 3
| 5
| 3
| 13
| 68
| 5
| 35
| 17
| 31
| 28
| 33
| 17
| 29
| 4
| 1
| 3
| 6
|
2,512
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/fastspeech2_conformer/modeling_fastspeech2_conformer.py
|
transformers.models.fastspeech2_conformer.modeling_fastspeech2_conformer.FastSpeech2ConformerSpeechDecoderPostnet
|
from torch import nn
import torch
class FastSpeech2ConformerSpeechDecoderPostnet(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.feat_out = nn.Linear(config.hidden_size, config.num_mel_bins * config.reduction_factor)
self.layers = nn.ModuleList([FastSpeech2ConformerBatchNormConvLayer(config, i) for i in range(config.speech_decoder_postnet_layers)])
def forward(self, hidden_states: torch.Tensor):
outputs_before_postnet = self.feat_out(hidden_states).view(hidden_states.size(0), -1, self.config.num_mel_bins)
layer_output = outputs_before_postnet.transpose(1, 2)
for layer in self.layers:
layer_output = layer(layer_output)
outputs_after_postnet = outputs_before_postnet + layer_output.transpose(1, 2)
return (outputs_before_postnet, outputs_after_postnet)
|
class FastSpeech2ConformerSpeechDecoderPostnet(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states: torch.Tensor):
pass
| 3
| 0
| 7
| 0
| 7
| 0
| 2
| 0
| 1
| 4
| 1
| 0
| 2
| 3
| 2
| 12
| 16
| 1
| 15
| 10
| 12
| 0
| 13
| 10
| 10
| 2
| 1
| 1
| 3
|
2,513
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/fastspeech2_conformer/modeling_fastspeech2_conformer.py
|
transformers.models.fastspeech2_conformer.modeling_fastspeech2_conformer.FastSpeech2ConformerVarianceEmbedding
|
from torch import nn
class FastSpeech2ConformerVarianceEmbedding(nn.Module):
def __init__(self, in_channels=1, out_channels=384, kernel_size=1, padding=0, dropout_rate=0.0):
super().__init__()
self.conv = nn.Conv1d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, padding=padding)
self.dropout = nn.Dropout(dropout_rate)
def forward(self, hidden_states):
hidden_states = hidden_states.transpose(1, 2)
hidden_states = self.conv(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = hidden_states.transpose(1, 2)
return hidden_states
|
class FastSpeech2ConformerVarianceEmbedding(nn.Module):
def __init__(self, in_channels=1, out_channels=384, kernel_size=1, padding=0, dropout_rate=0.0):
pass
def forward(self, hidden_states):
pass
| 3
| 0
| 11
| 0
| 11
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 2
| 2
| 2
| 12
| 24
| 1
| 23
| 12
| 13
| 0
| 11
| 5
| 8
| 1
| 1
| 0
| 2
|
2,514
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/fastspeech2_conformer/modeling_fastspeech2_conformer.py
|
transformers.models.fastspeech2_conformer.modeling_fastspeech2_conformer.FastSpeech2ConformerVariancePredictor
|
from torch import nn
from .configuration_fastspeech2_conformer import FastSpeech2ConformerConfig, FastSpeech2ConformerHifiGanConfig, FastSpeech2ConformerWithHifiGanConfig
class FastSpeech2ConformerVariancePredictor(nn.Module):
def __init__(self, config: FastSpeech2ConformerConfig, num_layers=2, num_chans=384, kernel_size=3, dropout_rate=0.5):
"""
Initialize variance predictor module.
Args:
input_dim (`int`): Input dimension.
num_layers (`int`, *optional*, defaults to 2): Number of convolutional layers.
num_chans (`int`, *optional*, defaults to 384): Number of channels of convolutional layers.
kernel_size (`int`, *optional*, defaults to 3): Kernel size of convolutional layers.
dropout_rate (`float`, *optional*, defaults to 0.5): Dropout rate.
"""
super().__init__()
self.conv_layers = nn.ModuleList()
for idx in range(num_layers):
input_channels = config.hidden_size if idx == 0 else num_chans
layer = FastSpeech2ConformerPredictorLayer(input_channels, num_chans, kernel_size, dropout_rate)
self.conv_layers.append(layer)
self.linear = nn.Linear(num_chans, 1)
def forward(self, encoder_hidden_states, padding_masks=None):
"""
Calculate forward propagation.
Args:
encoder_hidden_states (`torch.Tensor` of shape `(batch_size, max_text_length, input_dim)`):
Batch of input sequences.
padding_masks (`torch.ByteTensor` of shape `(batch_size, max_text_length)`, *optional*):
Batch of masks indicating padded part.
Returns:
Tensor: Batch of predicted sequences `(batch_size, max_text_length, 1)`.
"""
hidden_states = encoder_hidden_states.transpose(1, -1)
for layer in self.conv_layers:
hidden_states = layer(hidden_states)
hidden_states = self.linear(hidden_states.transpose(1, 2))
if padding_masks is not None:
hidden_states = hidden_states.masked_fill(padding_masks, 0.0)
return hidden_states
|
class FastSpeech2ConformerVariancePredictor(nn.Module):
def __init__(self, config: FastSpeech2ConformerConfig, num_layers=2, num_chans=384, kernel_size=3, dropout_rate=0.5):
'''
Initialize variance predictor module.
Args:
input_dim (`int`): Input dimension.
num_layers (`int`, *optional*, defaults to 2): Number of convolutional layers.
num_chans (`int`, *optional*, defaults to 384): Number of channels of convolutional layers.
kernel_size (`int`, *optional*, defaults to 3): Kernel size of convolutional layers.
dropout_rate (`float`, *optional*, defaults to 0.5): Dropout rate.
'''
pass
def forward(self, encoder_hidden_states, padding_masks=None):
'''
Calculate forward propagation.
Args:
encoder_hidden_states (`torch.Tensor` of shape `(batch_size, max_text_length, input_dim)`):
Batch of input sequences.
padding_masks (`torch.ByteTensor` of shape `(batch_size, max_text_length)`, *optional*):
Batch of masks indicating padded part.
Returns:
Tensor: Batch of predicted sequences `(batch_size, max_text_length, 1)`.
'''
pass
| 3
| 2
| 25
| 3
| 12
| 10
| 3
| 0.83
| 1
| 4
| 2
| 0
| 2
| 2
| 2
| 12
| 51
| 7
| 24
| 17
| 14
| 20
| 17
| 10
| 14
| 3
| 1
| 1
| 6
|
2,515
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/fastspeech2_conformer/modeling_fastspeech2_conformer.py
|
transformers.models.fastspeech2_conformer.modeling_fastspeech2_conformer.FastSpeech2ConformerWithHifiGan
|
from .configuration_fastspeech2_conformer import FastSpeech2ConformerConfig, FastSpeech2ConformerHifiGanConfig, FastSpeech2ConformerWithHifiGanConfig
from ...utils import ModelOutput, auto_docstring, logging
from typing import Optional, Union
from ...modeling_utils import PreTrainedModel
import torch
@auto_docstring(custom_intro='\n The FastSpeech2ConformerModel with a FastSpeech2ConformerHifiGan vocoder head that performs text-to-speech (waveform).\n ')
class FastSpeech2ConformerWithHifiGan(PreTrainedModel):
config: FastSpeech2ConformerWithHifiGanConfig
def __init__(self, config: FastSpeech2ConformerWithHifiGanConfig):
super().__init__(config)
self.model = FastSpeech2ConformerModel(config.model_config)
self.vocoder = FastSpeech2ConformerHifiGan(config.vocoder_config)
self.config = config
@auto_docstring
def forward(self, input_ids: torch.LongTensor, attention_mask: Optional[torch.LongTensor]=None, spectrogram_labels: Optional[torch.FloatTensor]=None, duration_labels: Optional[torch.LongTensor]=None, pitch_labels: Optional[torch.FloatTensor]=None, energy_labels: Optional[torch.FloatTensor]=None, speaker_ids: Optional[torch.LongTensor]=None, lang_ids: Optional[torch.LongTensor]=None, speaker_embedding: Optional[torch.FloatTensor]=None, return_dict: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None) -> Union[tuple, FastSpeech2ConformerModelOutput]:
"""
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Input sequence of text vectors.
spectrogram_labels (`torch.FloatTensor` of shape `(batch_size, max_spectrogram_length, num_mel_bins)`, *optional*, defaults to `None`):
Batch of padded target features.
duration_labels (`torch.LongTensor` of shape `(batch_size, sequence_length + 1)`, *optional*, defaults to `None`):
Batch of padded durations.
pitch_labels (`torch.FloatTensor` of shape `(batch_size, sequence_length + 1, 1)`, *optional*, defaults to `None`):
Batch of padded token-averaged pitch.
energy_labels (`torch.FloatTensor` of shape `(batch_size, sequence_length + 1, 1)`, *optional*, defaults to `None`):
Batch of padded token-averaged energy.
speaker_ids (`torch.LongTensor` of shape `(batch_size, 1)`, *optional*, defaults to `None`):
Speaker ids used to condition features of speech output by the model.
lang_ids (`torch.LongTensor` of shape `(batch_size, 1)`, *optional*, defaults to `None`):
Language ids used to condition features of speech output by the model.
speaker_embedding (`torch.FloatTensor` of shape `(batch_size, embedding_dim)`, *optional*, defaults to `None`):
Embedding containing conditioning signals for the features of the speech.
Example:
```python
>>> from transformers import (
... FastSpeech2ConformerTokenizer,
... FastSpeech2ConformerWithHifiGan,
... )
>>> tokenizer = FastSpeech2ConformerTokenizer.from_pretrained("espnet/fastspeech2_conformer")
>>> inputs = tokenizer("some text to convert to speech", return_tensors="pt")
>>> input_ids = inputs["input_ids"]
>>> model = FastSpeech2ConformerWithHifiGan.from_pretrained("espnet/fastspeech2_conformer_with_hifigan")
>>> output_dict = model(input_ids, return_dict=True)
>>> waveform = output_dict["waveform"]
>>> print(waveform.shape)
torch.Size([1, 49664])
```
"""
return_dict = return_dict if return_dict is not None else self.config.model_config.use_return_dict
output_attentions = output_attentions if output_attentions is not None else self.config.model_config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.model_config.output_hidden_states
model_outputs = self.model(input_ids, attention_mask, spectrogram_labels=spectrogram_labels, duration_labels=duration_labels, pitch_labels=pitch_labels, energy_labels=energy_labels, speaker_ids=speaker_ids, lang_ids=lang_ids, speaker_embedding=speaker_embedding, return_dict=return_dict, output_attentions=output_attentions, output_hidden_states=output_hidden_states)
if not return_dict:
has_missing_labels = spectrogram_labels is None or duration_labels is None or pitch_labels is None or (energy_labels is None)
if has_missing_labels:
spectrogram = model_outputs[0]
else:
spectrogram = model_outputs[1]
else:
spectrogram = model_outputs['spectrogram']
waveform = self.vocoder(spectrogram)
if not return_dict:
return model_outputs + (waveform,)
return FastSpeech2ConformerWithHifiGanOutput(waveform=waveform, **model_outputs)
|
@auto_docstring(custom_intro='\n The FastSpeech2ConformerModel with a FastSpeech2ConformerHifiGan vocoder head that performs text-to-speech (waveform).\n ')
class FastSpeech2ConformerWithHifiGan(PreTrainedModel):
def __init__(self, config: FastSpeech2ConformerWithHifiGanConfig):
pass
@auto_docstring
def forward(self, input_ids: torch.LongTensor, attention_mask: Optional[torch.LongTensor]=None, spectrogram_labels: Optional[torch.FloatTensor]=None, duration_labels: Optional[torch.LongTensor]=None, pitch_labels: Optional[torch.FloatTensor]=None, energy_labels: Optional[torch.FloatTensor]=None, speaker_ids: Optional[torch.LongTensor]=None, lang_ids: Optional[torch.LongTensor]=None, speaker_embedding: Optional[torch.FloatTensor]=None, return_dict: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None) -> Union[tuple, FastSpeech2ConformerModelOutput]:
'''
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Input sequence of text vectors.
spectrogram_labels (`torch.FloatTensor` of shape `(batch_size, max_spectrogram_length, num_mel_bins)`, *optional*, defaults to `None`):
Batch of padded target features.
duration_labels (`torch.LongTensor` of shape `(batch_size, sequence_length + 1)`, *optional*, defaults to `None`):
Batch of padded durations.
pitch_labels (`torch.FloatTensor` of shape `(batch_size, sequence_length + 1, 1)`, *optional*, defaults to `None`):
Batch of padded token-averaged pitch.
energy_labels (`torch.FloatTensor` of shape `(batch_size, sequence_length + 1, 1)`, *optional*, defaults to `None`):
Batch of padded token-averaged energy.
speaker_ids (`torch.LongTensor` of shape `(batch_size, 1)`, *optional*, defaults to `None`):
Speaker ids used to condition features of speech output by the model.
lang_ids (`torch.LongTensor` of shape `(batch_size, 1)`, *optional*, defaults to `None`):
Language ids used to condition features of speech output by the model.
speaker_embedding (`torch.FloatTensor` of shape `(batch_size, embedding_dim)`, *optional*, defaults to `None`):
Embedding containing conditioning signals for the features of the speech.
Example:
```python
>>> from transformers import (
... FastSpeech2ConformerTokenizer,
... FastSpeech2ConformerWithHifiGan,
... )
>>> tokenizer = FastSpeech2ConformerTokenizer.from_pretrained("espnet/fastspeech2_conformer")
>>> inputs = tokenizer("some text to convert to speech", return_tensors="pt")
>>> input_ids = inputs["input_ids"]
>>> model = FastSpeech2ConformerWithHifiGan.from_pretrained("espnet/fastspeech2_conformer_with_hifigan")
>>> output_dict = model(input_ids, return_dict=True)
>>> waveform = output_dict["waveform"]
>>> print(waveform.shape)
torch.Size([1, 49664])
```
'''
pass
| 5
| 1
| 56
| 6
| 28
| 23
| 4
| 0.77
| 1
| 7
| 5
| 0
| 2
| 3
| 2
| 2
| 119
| 13
| 60
| 28
| 40
| 46
| 22
| 11
| 19
| 7
| 1
| 2
| 8
|
2,516
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/fastspeech2_conformer/modeling_fastspeech2_conformer.py
|
transformers.models.fastspeech2_conformer.modeling_fastspeech2_conformer.FastSpeech2ConformerWithHifiGanOutput
|
from typing import Optional, Union
from dataclasses import dataclass
import torch
from ...utils import ModelOutput, auto_docstring, logging
@dataclass
@auto_docstring(custom_intro='\n Output type of [`FastSpeech2ConformerWithHifiGan`].\n ')
class FastSpeech2ConformerWithHifiGanOutput(FastSpeech2ConformerModelOutput):
"""
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Spectrogram generation loss.
duration_outputs (`torch.LongTensor` of shape `(batch_size, max_text_length + 1)`, *optional*):
Outputs of the duration predictor.
pitch_outputs (`torch.FloatTensor` of shape `(batch_size, max_text_length + 1, 1)`, *optional*):
Outputs of the pitch predictor.
energy_outputs (`torch.FloatTensor` of shape `(batch_size, max_text_length + 1, 1)`, *optional*):
Outputs of the energy predictor.
waveform (`torch.FloatTensor` of shape `(batch_size, audio_length)`):
Speech output as a result of passing the predicted mel spectrogram through the vocoder.
"""
waveform: Optional[torch.FloatTensor] = None
|
@dataclass
@auto_docstring(custom_intro='\n Output type of [`FastSpeech2ConformerWithHifiGan`].\n ')
class FastSpeech2ConformerWithHifiGanOutput(FastSpeech2ConformerModelOutput):
'''
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Spectrogram generation loss.
duration_outputs (`torch.LongTensor` of shape `(batch_size, max_text_length + 1)`, *optional*):
Outputs of the duration predictor.
pitch_outputs (`torch.FloatTensor` of shape `(batch_size, max_text_length + 1, 1)`, *optional*):
Outputs of the pitch predictor.
energy_outputs (`torch.FloatTensor` of shape `(batch_size, max_text_length + 1, 1)`, *optional*):
Outputs of the energy predictor.
waveform (`torch.FloatTensor` of shape `(batch_size, audio_length)`):
Speech output as a result of passing the predicted mel spectrogram through the vocoder.
'''
pass
| 3
| 1
| 0
| 0
| 0
| 0
| 0
| 18
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 44
| 6
| 2
| 2
| 1
| 36
| 2
| 2
| 1
| 0
| 2
| 0
| 0
|
2,517
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/fastspeech2_conformer/tokenization_fastspeech2_conformer.py
|
transformers.models.fastspeech2_conformer.tokenization_fastspeech2_conformer.FastSpeech2ConformerTokenizer
|
from typing import Optional
import regex
from ...utils import logging, requires_backends
from ...tokenization_utils import PreTrainedTokenizer
import json
import os
class FastSpeech2ConformerTokenizer(PreTrainedTokenizer):
"""
Construct a FastSpeech2Conformer tokenizer.
Args:
vocab_file (`str`):
Path to the vocabulary file.
bos_token (`str`, *optional*, defaults to `"<sos/eos>"`):
The begin of sequence token. Note that for FastSpeech2, it is the same as the `eos_token`.
eos_token (`str`, *optional*, defaults to `"<sos/eos>"`):
The end of sequence token. Note that for FastSpeech2, it is the same as the `bos_token`.
pad_token (`str`, *optional*, defaults to `"<blank>"`):
The token used for padding, for example when batching sequences of different lengths.
unk_token (`str`, *optional*, defaults to `"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
should_strip_spaces (`bool`, *optional*, defaults to `False`):
Whether or not to strip the spaces from the list of tokens.
"""
vocab_files_names = VOCAB_FILES_NAMES
model_input_names = ['input_ids', 'attention_mask']
def __init__(self, vocab_file, bos_token='<sos/eos>', eos_token='<sos/eos>', pad_token='<blank>', unk_token='<unk>', should_strip_spaces=False, **kwargs):
requires_backends(self, 'g2p_en')
with open(vocab_file, encoding='utf-8') as vocab_handle:
self.encoder = json.load(vocab_handle)
import g2p_en
self.g2p = g2p_en.G2p()
self.decoder = {v: k for k, v in self.encoder.items()}
super().__init__(bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, pad_token=pad_token, should_strip_spaces=should_strip_spaces, **kwargs)
self.should_strip_spaces = should_strip_spaces
@property
def vocab_size(self):
return len(self.decoder)
def get_vocab(self):
"""Returns vocab as a dict"""
return dict(self.encoder, **self.added_tokens_encoder)
def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs):
text = regex.sub(';', ',', text)
text = regex.sub(':', ',', text)
text = regex.sub('-', ' ', text)
text = regex.sub('&', 'and', text)
text = regex.sub('[\\(\\)\\[\\]\\<\\>\\"]+', '', text)
text = regex.sub('\\s+', ' ', text)
text = text.upper()
return (text, kwargs)
def _tokenize(self, text):
"""Returns a tokenized string."""
tokens = self.g2p(text)
if self.should_strip_spaces:
tokens = list(filter(lambda s: s != ' ', tokens))
tokens.append(self.eos_token)
return tokens
def _convert_token_to_id(self, token):
"""Converts a token (str) in an id using the vocab."""
return self.encoder.get(token, self.encoder.get(self.unk_token))
def _convert_id_to_token(self, index):
"""Converts an index (integer) in a token (str) using the vocab."""
return self.decoder.get(index, self.unk_token)
def decode(self, token_ids, **kwargs):
logger.warning('Phonemes cannot be reliably converted to a string due to the one-many mapping, converting to tokens instead.')
return self.convert_ids_to_tokens(token_ids)
def convert_tokens_to_string(self, tokens, **kwargs):
logger.warning('Phonemes cannot be reliably converted to a string due to the one-many mapping, returning the tokens.')
return tokens
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]:
"""
Save the vocabulary and special tokens file to a directory.
Args:
save_directory (`str`):
The directory in which to save the vocabulary.
Returns:
`Tuple(str)`: Paths to the files saved.
"""
if not os.path.isdir(save_directory):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
with open(vocab_file, 'w', encoding='utf-8') as f:
f.write(json.dumps(self.get_vocab(), ensure_ascii=False))
return (vocab_file,)
def __getstate__(self):
state = self.__dict__.copy()
state['g2p'] = None
return state
def __setstate__(self, d):
self.__dict__ = d
try:
import g2p_en
self.g2p = g2p_en.G2p()
except ImportError:
raise ImportError('You need to install g2p-en to use FastSpeech2ConformerTokenizer. See https://pypi.org/project/g2p-en/ for installation.')
|
class FastSpeech2ConformerTokenizer(PreTrainedTokenizer):
'''
Construct a FastSpeech2Conformer tokenizer.
Args:
vocab_file (`str`):
Path to the vocabulary file.
bos_token (`str`, *optional*, defaults to `"<sos/eos>"`):
The begin of sequence token. Note that for FastSpeech2, it is the same as the `eos_token`.
eos_token (`str`, *optional*, defaults to `"<sos/eos>"`):
The end of sequence token. Note that for FastSpeech2, it is the same as the `bos_token`.
pad_token (`str`, *optional*, defaults to `"<blank>"`):
The token used for padding, for example when batching sequences of different lengths.
unk_token (`str`, *optional*, defaults to `"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
should_strip_spaces (`bool`, *optional*, defaults to `False`):
Whether or not to strip the spaces from the list of tokens.
'''
def __init__(self, vocab_file, bos_token='<sos/eos>', eos_token='<sos/eos>', pad_token='<blank>', unk_token='<unk>', should_strip_spaces=False, **kwargs):
pass
@property
def vocab_size(self):
pass
def get_vocab(self):
'''Returns vocab as a dict'''
pass
def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs):
pass
def _tokenize(self, text):
'''Returns a tokenized string.'''
pass
def _convert_token_to_id(self, token):
'''Converts a token (str) in an id using the vocab.'''
pass
def _convert_id_to_token(self, index):
'''Converts an index (integer) in a token (str) using the vocab.'''
pass
def decode(self, token_ids, **kwargs):
pass
def convert_tokens_to_string(self, tokens, **kwargs):
pass
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]:
'''
Save the vocabulary and special tokens file to a directory.
Args:
save_directory (`str`):
The directory in which to save the vocabulary.
Returns:
`Tuple(str)`: Paths to the files saved.
'''
pass
def __getstate__(self):
pass
def __setstate__(self, d):
pass
| 14
| 6
| 10
| 2
| 7
| 1
| 1
| 0.41
| 1
| 6
| 0
| 0
| 12
| 5
| 12
| 101
| 154
| 33
| 86
| 37
| 61
| 35
| 60
| 25
| 45
| 3
| 3
| 1
| 16
|
2,518
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/flaubert/configuration_flaubert.py
|
transformers.models.flaubert.configuration_flaubert.FlaubertConfig
|
from ...configuration_utils import PretrainedConfig
class FlaubertConfig(PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`FlaubertModel`] or a [`TFFlaubertModel`]. It is
used to instantiate a FlauBERT model according to the specified arguments, defining the model architecture.
Instantiating a configuration with the defaults will yield a similar configuration to that of the FlauBERT
[flaubert/flaubert_base_uncased](https://huggingface.co/flaubert/flaubert_base_uncased) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
pre_norm (`bool`, *optional*, defaults to `False`):
Whether to apply the layer normalization before or after the feed forward layer following the attention in
each layer (Vaswani et al., Tensor2Tensor for Neural Machine Translation. 2018)
layerdrop (`float`, *optional*, defaults to 0.0):
Probability to drop layers during training (Fan et al., Reducing Transformer Depth on Demand with
Structured Dropout. ICLR 2020)
vocab_size (`int`, *optional*, defaults to 30145):
Vocabulary size of the FlauBERT model. Defines the number of different tokens that can be represented by
the `inputs_ids` passed when calling [`FlaubertModel`] or [`TFFlaubertModel`].
emb_dim (`int`, *optional*, defaults to 2048):
Dimensionality of the encoder layers and the pooler layer.
n_layer (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
n_head (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer encoder.
dropout (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_dropout (`float`, *optional*, defaults to 0.1):
The dropout probability for the attention mechanism
gelu_activation (`bool`, *optional*, defaults to `True`):
Whether or not to use a *gelu* activation instead of *relu*.
sinusoidal_embeddings (`bool`, *optional*, defaults to `False`):
Whether or not to use sinusoidal positional embeddings instead of absolute positional embeddings.
causal (`bool`, *optional*, defaults to `False`):
Whether or not the model should behave in a causal manner. Causal models use a triangular attention mask in
order to only attend to the left-side context instead if a bidirectional context.
asm (`bool`, *optional*, defaults to `False`):
Whether or not to use an adaptive log softmax projection layer instead of a linear layer for the prediction
layer.
n_langs (`int`, *optional*, defaults to 1):
The number of languages the model handles. Set to 1 for monolingual models.
use_lang_emb (`bool`, *optional*, defaults to `True`)
Whether to use language embeddings. Some models use additional language embeddings, see [the multilingual
models page](http://huggingface.co/transformers/multilingual.html#xlm-language-embeddings) for information
on how to use them.
max_position_embeddings (`int`, *optional*, defaults to 512):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
embed_init_std (`float`, *optional*, defaults to 2048^-0.5):
The standard deviation of the truncated_normal_initializer for initializing the embedding matrices.
init_std (`int`, *optional*, defaults to 50257):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices except the
embedding matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
The epsilon used by the layer normalization layers.
bos_index (`int`, *optional*, defaults to 0):
The index of the beginning of sentence token in the vocabulary.
eos_index (`int`, *optional*, defaults to 1):
The index of the end of sentence token in the vocabulary.
pad_index (`int`, *optional*, defaults to 2):
The index of the padding token in the vocabulary.
unk_index (`int`, *optional*, defaults to 3):
The index of the unknown token in the vocabulary.
mask_index (`int`, *optional*, defaults to 5):
The index of the masking token in the vocabulary.
is_encoder(`bool`, *optional*, defaults to `True`):
Whether or not the initialized model should be a transformer encoder or decoder as seen in Vaswani et al.
summary_type (`string`, *optional*, defaults to "first"):
Argument used when doing sequence summary. Used in the sequence classification and multiple choice models.
Has to be one of the following options:
- `"last"`: Take the last token hidden state (like XLNet).
- `"first"`: Take the first token hidden state (like BERT).
- `"mean"`: Take the mean of all tokens hidden states.
- `"cls_index"`: Supply a Tensor of classification token position (like GPT/GPT-2).
- `"attn"`: Not implemented now, use multi-head attention.
summary_use_proj (`bool`, *optional*, defaults to `True`):
Argument used when doing sequence summary. Used in the sequence classification and multiple choice models.
Whether or not to add a projection after the vector extraction.
summary_activation (`str`, *optional*):
Argument used when doing sequence summary. Used in the sequence classification and multiple choice models.
Pass `"tanh"` for a tanh activation to the output, any other value will result in no activation.
summary_proj_to_labels (`bool`, *optional*, defaults to `True`):
Used in the sequence classification and multiple choice models.
Whether the projection outputs should have `config.num_labels` or `config.hidden_size` classes.
summary_first_dropout (`float`, *optional*, defaults to 0.1):
Used in the sequence classification and multiple choice models.
The dropout ratio to be used after the projection and activation.
start_n_top (`int`, *optional*, defaults to 5):
Used in the SQuAD evaluation script.
end_n_top (`int`, *optional*, defaults to 5):
Used in the SQuAD evaluation script.
mask_token_id (`int`, *optional*, defaults to 0):
Model agnostic parameter to identify masked tokens when generating text in an MLM context.
lang_id (`int`, *optional*, defaults to 1):
The ID of the language used by the model. This parameter is used when generating text in a given language.
"""
model_type = 'flaubert'
attribute_map = {'hidden_size': 'emb_dim', 'num_attention_heads': 'n_heads', 'num_hidden_layers': 'n_layers', 'n_words': 'vocab_size'}
def __init__(self, pre_norm=False, layerdrop=0.0, vocab_size=30145, emb_dim=2048, n_layers=12, n_heads=16, dropout=0.1, attention_dropout=0.1, gelu_activation=True, sinusoidal_embeddings=False, causal=False, asm=False, n_langs=1, use_lang_emb=True, max_position_embeddings=512, embed_init_std=2048 ** (-0.5), layer_norm_eps=1e-12, init_std=0.02, bos_index=0, eos_index=1, pad_index=2, unk_index=3, mask_index=5, is_encoder=True, summary_type='first', summary_use_proj=True, summary_activation=None, summary_proj_to_labels=True, summary_first_dropout=0.1, start_n_top=5, end_n_top=5, mask_token_id=0, lang_id=0, pad_token_id=2, bos_token_id=0, **kwargs):
"""Constructs FlaubertConfig."""
self.pre_norm = pre_norm
self.layerdrop = layerdrop
self.vocab_size = vocab_size
self.emb_dim = emb_dim
self.n_layers = n_layers
self.n_heads = n_heads
self.dropout = dropout
self.attention_dropout = attention_dropout
self.gelu_activation = gelu_activation
self.sinusoidal_embeddings = sinusoidal_embeddings
self.causal = causal
self.asm = asm
self.n_langs = n_langs
self.use_lang_emb = use_lang_emb
self.layer_norm_eps = layer_norm_eps
self.bos_index = bos_index
self.eos_index = eos_index
self.pad_index = pad_index
self.unk_index = unk_index
self.mask_index = mask_index
self.is_encoder = is_encoder
self.max_position_embeddings = max_position_embeddings
self.embed_init_std = embed_init_std
self.init_std = init_std
self.summary_type = summary_type
self.summary_use_proj = summary_use_proj
self.summary_activation = summary_activation
self.summary_proj_to_labels = summary_proj_to_labels
self.summary_first_dropout = summary_first_dropout
self.start_n_top = start_n_top
self.end_n_top = end_n_top
self.mask_token_id = mask_token_id
self.lang_id = lang_id
if 'n_words' in kwargs:
self.n_words = kwargs['n_words']
super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, **kwargs)
|
class FlaubertConfig(PretrainedConfig):
'''
This is the configuration class to store the configuration of a [`FlaubertModel`] or a [`TFFlaubertModel`]. It is
used to instantiate a FlauBERT model according to the specified arguments, defining the model architecture.
Instantiating a configuration with the defaults will yield a similar configuration to that of the FlauBERT
[flaubert/flaubert_base_uncased](https://huggingface.co/flaubert/flaubert_base_uncased) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
pre_norm (`bool`, *optional*, defaults to `False`):
Whether to apply the layer normalization before or after the feed forward layer following the attention in
each layer (Vaswani et al., Tensor2Tensor for Neural Machine Translation. 2018)
layerdrop (`float`, *optional*, defaults to 0.0):
Probability to drop layers during training (Fan et al., Reducing Transformer Depth on Demand with
Structured Dropout. ICLR 2020)
vocab_size (`int`, *optional*, defaults to 30145):
Vocabulary size of the FlauBERT model. Defines the number of different tokens that can be represented by
the `inputs_ids` passed when calling [`FlaubertModel`] or [`TFFlaubertModel`].
emb_dim (`int`, *optional*, defaults to 2048):
Dimensionality of the encoder layers and the pooler layer.
n_layer (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
n_head (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer encoder.
dropout (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_dropout (`float`, *optional*, defaults to 0.1):
The dropout probability for the attention mechanism
gelu_activation (`bool`, *optional*, defaults to `True`):
Whether or not to use a *gelu* activation instead of *relu*.
sinusoidal_embeddings (`bool`, *optional*, defaults to `False`):
Whether or not to use sinusoidal positional embeddings instead of absolute positional embeddings.
causal (`bool`, *optional*, defaults to `False`):
Whether or not the model should behave in a causal manner. Causal models use a triangular attention mask in
order to only attend to the left-side context instead if a bidirectional context.
asm (`bool`, *optional*, defaults to `False`):
Whether or not to use an adaptive log softmax projection layer instead of a linear layer for the prediction
layer.
n_langs (`int`, *optional*, defaults to 1):
The number of languages the model handles. Set to 1 for monolingual models.
use_lang_emb (`bool`, *optional*, defaults to `True`)
Whether to use language embeddings. Some models use additional language embeddings, see [the multilingual
models page](http://huggingface.co/transformers/multilingual.html#xlm-language-embeddings) for information
on how to use them.
max_position_embeddings (`int`, *optional*, defaults to 512):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
embed_init_std (`float`, *optional*, defaults to 2048^-0.5):
The standard deviation of the truncated_normal_initializer for initializing the embedding matrices.
init_std (`int`, *optional*, defaults to 50257):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices except the
embedding matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
The epsilon used by the layer normalization layers.
bos_index (`int`, *optional*, defaults to 0):
The index of the beginning of sentence token in the vocabulary.
eos_index (`int`, *optional*, defaults to 1):
The index of the end of sentence token in the vocabulary.
pad_index (`int`, *optional*, defaults to 2):
The index of the padding token in the vocabulary.
unk_index (`int`, *optional*, defaults to 3):
The index of the unknown token in the vocabulary.
mask_index (`int`, *optional*, defaults to 5):
The index of the masking token in the vocabulary.
is_encoder(`bool`, *optional*, defaults to `True`):
Whether or not the initialized model should be a transformer encoder or decoder as seen in Vaswani et al.
summary_type (`string`, *optional*, defaults to "first"):
Argument used when doing sequence summary. Used in the sequence classification and multiple choice models.
Has to be one of the following options:
- `"last"`: Take the last token hidden state (like XLNet).
- `"first"`: Take the first token hidden state (like BERT).
- `"mean"`: Take the mean of all tokens hidden states.
- `"cls_index"`: Supply a Tensor of classification token position (like GPT/GPT-2).
- `"attn"`: Not implemented now, use multi-head attention.
summary_use_proj (`bool`, *optional*, defaults to `True`):
Argument used when doing sequence summary. Used in the sequence classification and multiple choice models.
Whether or not to add a projection after the vector extraction.
summary_activation (`str`, *optional*):
Argument used when doing sequence summary. Used in the sequence classification and multiple choice models.
Pass `"tanh"` for a tanh activation to the output, any other value will result in no activation.
summary_proj_to_labels (`bool`, *optional*, defaults to `True`):
Used in the sequence classification and multiple choice models.
Whether the projection outputs should have `config.num_labels` or `config.hidden_size` classes.
summary_first_dropout (`float`, *optional*, defaults to 0.1):
Used in the sequence classification and multiple choice models.
The dropout ratio to be used after the projection and activation.
start_n_top (`int`, *optional*, defaults to 5):
Used in the SQuAD evaluation script.
end_n_top (`int`, *optional*, defaults to 5):
Used in the SQuAD evaluation script.
mask_token_id (`int`, *optional*, defaults to 0):
Model agnostic parameter to identify masked tokens when generating text in an MLM context.
lang_id (`int`, *optional*, defaults to 1):
The ID of the language used by the model. This parameter is used when generating text in a given language.
'''
def __init__(self, pre_norm=False, layerdrop=0.0, vocab_size=30145, emb_dim=2048, n_layers=12, n_heads=16, dropout=0.1, attention_dropout=0.1, gelu_activation=True, sinusoidal_embeddings=False, causal=False, asm=False, n_langs=1, use_lang_emb=True, max_position_embeddings=512, embed_init_std=2048 ** (-0.5), layer_norm_eps=1e-12, init_std=0.02, bos_index=0, eos_index=1, pad_index=2, unk_index=3, mask_index=5, is_encoder=True, summary_type='first', summary_use_proj=True, summary_activation=None, summary_proj_to_labels=True, summary_first_dropout=0.1, start_n_top=5, end_n_top=5, mask_token_id=0, lang_id=0, pad_token_id=2, bos_token_id=0, **kwargs):
'''Constructs FlaubertConfig.'''
pass
| 2
| 2
| 78
| 2
| 75
| 1
| 2
| 1.16
| 1
| 1
| 0
| 0
| 1
| 34
| 1
| 1
| 190
| 12
| 83
| 76
| 43
| 96
| 40
| 38
| 38
| 2
| 1
| 1
| 2
|
2,519
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/flaubert/configuration_flaubert.py
|
transformers.models.flaubert.configuration_flaubert.FlaubertOnnxConfig
|
from collections.abc import Mapping
from ...onnx import OnnxConfig
from collections import OrderedDict
class FlaubertOnnxConfig(OnnxConfig):
@property
def inputs(self) -> Mapping[str, Mapping[int, str]]:
if self.task == 'multiple-choice':
dynamic_axis = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
dynamic_axis = {0: 'batch', 1: 'sequence'}
return OrderedDict([('input_ids', dynamic_axis), ('attention_mask', dynamic_axis)])
|
class FlaubertOnnxConfig(OnnxConfig):
@property
def inputs(self) -> Mapping[str, Mapping[int, str]]:
pass
| 3
| 0
| 11
| 0
| 11
| 0
| 2
| 0
| 1
| 3
| 0
| 0
| 1
| 0
| 1
| 1
| 13
| 0
| 13
| 4
| 10
| 0
| 6
| 3
| 4
| 2
| 1
| 1
| 2
|
2,520
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/flaubert/modeling_flaubert.py
|
transformers.models.flaubert.modeling_flaubert.FlaubertForMultipleChoice
|
import torch
from ...utils import ModelOutput, auto_docstring, logging
from ...modeling_outputs import BaseModelOutput, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput
from typing import Callable, Optional, Union
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from torch import nn
@auto_docstring
class FlaubertForMultipleChoice(FlaubertPreTrainedModel):
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.transformer = FlaubertModel(config)
self.sequence_summary = FlaubertSequenceSummary(config)
self.logits_proj = nn.Linear(config.num_labels, 1)
self.post_init()
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, langs: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, lengths: Optional[torch.Tensor]=None, cache: Optional[dict[str, torch.Tensor]]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, MultipleChoiceModelOutput]:
"""
input_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
langs (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*):
A parallel sequence of tokens to be used to indicate the language of each token in the input. Indices are
languages ids which can be obtained from the language names by using two conversion mappings provided in
the configuration of the model (only provided for multilingual models). More precisely, the *language name
to language id* mapping is in `model.config.lang2id` (which is a dictionary string to int) and the
*language id to language name* mapping is in `model.config.id2lang` (dictionary int to string).
See usage examples detailed in the [multilingual documentation](../multilingual).
token_type_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
1]`:
- 0 corresponds to a *sentence A* token,
- 1 corresponds to a *sentence B* token.
[What are token type IDs?](../glossary#token-type-ids)
position_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
config.max_position_embeddings - 1]`.
[What are position IDs?](../glossary#position-ids)
lengths (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Length of each sentence that can be used to avoid performing attention on padding token indices. You can
also use *attention_mask* for the same result (see above), kept here for compatibility. Indices selected in
`[0, ..., input_ids.size(-1)]`.
cache (`dict[str, torch.FloatTensor]`, *optional*):
Instance of `EncoderDecoderCache` that contains precomputed KV states. Can be used to speed up sequential
decoding.
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_choices, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
model's internal embedding lookup matrix.
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
`input_ids` above)
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
langs = langs.view(-1, langs.size(-1)) if langs is not None else None
inputs_embeds = inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1)) if inputs_embeds is not None else None
if lengths is not None:
logger.warning('The `lengths` parameter cannot be used with the Flaubert multiple choice models. Please use the attention mask instead.')
lengths = None
transformer_outputs = self.transformer(input_ids=input_ids, attention_mask=attention_mask, langs=langs, token_type_ids=token_type_ids, position_ids=position_ids, lengths=lengths, cache=cache, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
output = transformer_outputs[0]
logits = self.sequence_summary(output)
logits = self.logits_proj(logits)
reshaped_logits = logits.view(-1, num_choices)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
if not return_dict:
output = (reshaped_logits,) + transformer_outputs[1:]
return (loss,) + output if loss is not None else output
return MultipleChoiceModelOutput(loss=loss, logits=reshaped_logits, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions)
|
@auto_docstring
class FlaubertForMultipleChoice(FlaubertPreTrainedModel):
def __init__(self, config, *inputs, **kwargs):
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, langs: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, lengths: Optional[torch.Tensor]=None, cache: Optional[dict[str, torch.Tensor]]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, MultipleChoiceModelOutput]:
'''
input_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
langs (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*):
A parallel sequence of tokens to be used to indicate the language of each token in the input. Indices are
languages ids which can be obtained from the language names by using two conversion mappings provided in
the configuration of the model (only provided for multilingual models). More precisely, the *language name
to language id* mapping is in `model.config.lang2id` (which is a dictionary string to int) and the
*language id to language name* mapping is in `model.config.id2lang` (dictionary int to string).
See usage examples detailed in the [multilingual documentation](../multilingual).
token_type_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
1]`:
- 0 corresponds to a *sentence A* token,
- 1 corresponds to a *sentence B* token.
[What are token type IDs?](../glossary#token-type-ids)
position_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
config.max_position_embeddings - 1]`.
[What are position IDs?](../glossary#position-ids)
lengths (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Length of each sentence that can be used to avoid performing attention on padding token indices. You can
also use *attention_mask* for the same result (see above), kept here for compatibility. Indices selected in
`[0, ..., input_ids.size(-1)]`.
cache (`dict[str, torch.FloatTensor]`, *optional*):
Instance of `EncoderDecoderCache` that contains precomputed KV states. Can be used to speed up sequential
decoding.
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_choices, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
model's internal embedding lookup matrix.
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
`input_ids` above)
'''
pass
| 5
| 1
| 43
| 4
| 36
| 4
| 7
| 0.09
| 1
| 6
| 2
| 0
| 2
| 3
| 2
| 5
| 96
| 9
| 80
| 31
| 54
| 7
| 32
| 13
| 29
| 13
| 2
| 1
| 14
|
2,521
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/flaubert/modeling_flaubert.py
|
transformers.models.flaubert.modeling_flaubert.FlaubertForQuestionAnswering
|
import torch
from typing import Callable, Optional, Union
from ...utils import ModelOutput, auto_docstring, logging
@auto_docstring
class FlaubertForQuestionAnswering(FlaubertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.transformer = FlaubertModel(config)
self.qa_outputs = FlaubertSQuADHead(config)
self.post_init()
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, langs: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, lengths: Optional[torch.Tensor]=None, cache: Optional[dict[str, torch.Tensor]]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, start_positions: Optional[torch.Tensor]=None, end_positions: Optional[torch.Tensor]=None, is_impossible: Optional[torch.Tensor]=None, cls_index: Optional[torch.Tensor]=None, p_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, FlaubertForQuestionAnsweringOutput]:
"""
langs (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
A parallel sequence of tokens to be used to indicate the language of each token in the input. Indices are
languages ids which can be obtained from the language names by using two conversion mappings provided in
the configuration of the model (only provided for multilingual models). More precisely, the *language name
to language id* mapping is in `model.config.lang2id` (which is a dictionary string to int) and the
*language id to language name* mapping is in `model.config.id2lang` (dictionary int to string).
See usage examples detailed in the [multilingual documentation](../multilingual).
lengths (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Length of each sentence that can be used to avoid performing attention on padding token indices. You can
also use *attention_mask* for the same result (see above), kept here for compatibility. Indices selected in
`[0, ..., input_ids.size(-1)]`.
cache (`dict[str, torch.FloatTensor]`, *optional*):
Instance of `EncoderDecoderCache` that contains precomputed KV states. Can be used to speed up sequential
decoding.
is_impossible (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels whether a question has an answer or no answer (SQuAD 2.0)
cls_index (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for position (index) of the classification token to use as input for computing plausibility of the
answer.
p_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
Optional mask of tokens which can't be in answers (e.g. [CLS], [PAD], ...). 1.0 means token should be
masked. 0.0 mean token is not masked.
Example:
```python
>>> from transformers import AutoTokenizer, FlaubertForQuestionAnswering
>>> import torch
>>> tokenizer = AutoTokenizer.from_pretrained("FacebookAI/xlm-mlm-en-2048")
>>> model = FlaubertForQuestionAnswering.from_pretrained("FacebookAI/xlm-mlm-en-2048")
>>> input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(
... 0
... ) # Batch size 1
>>> start_positions = torch.tensor([1])
>>> end_positions = torch.tensor([3])
>>> outputs = model(input_ids, start_positions=start_positions, end_positions=end_positions)
>>> loss = outputs.loss
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
transformer_outputs = self.transformer(input_ids, attention_mask=attention_mask, langs=langs, token_type_ids=token_type_ids, position_ids=position_ids, lengths=lengths, cache=cache, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
output = transformer_outputs[0]
outputs = self.qa_outputs(output, start_positions=start_positions, end_positions=end_positions, cls_index=cls_index, is_impossible=is_impossible, p_mask=p_mask, return_dict=return_dict)
if not return_dict:
return outputs + transformer_outputs[1:]
return FlaubertForQuestionAnsweringOutput(loss=outputs.loss, start_top_log_probs=outputs.start_top_log_probs, start_top_index=outputs.start_top_index, end_top_log_probs=outputs.end_top_log_probs, end_top_index=outputs.end_top_index, cls_logits=outputs.cls_logits, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions)
|
@auto_docstring
class FlaubertForQuestionAnswering(FlaubertPreTrainedModel):
def __init__(self, config):
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, langs: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, lengths: Optional[torch.Tensor]=None, cache: Optional[dict[str, torch.Tensor]]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, start_positions: Optional[torch.Tensor]=None, end_positions: Optional[torch.Tensor]=None, is_impossible: Optional[torch.Tensor]=None, cls_index: Optional[torch.Tensor]=None, p_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, FlaubertForQuestionAnsweringOutput]:
'''
langs (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
A parallel sequence of tokens to be used to indicate the language of each token in the input. Indices are
languages ids which can be obtained from the language names by using two conversion mappings provided in
the configuration of the model (only provided for multilingual models). More precisely, the *language name
to language id* mapping is in `model.config.lang2id` (which is a dictionary string to int) and the
*language id to language name* mapping is in `model.config.id2lang` (dictionary int to string).
See usage examples detailed in the [multilingual documentation](../multilingual).
lengths (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Length of each sentence that can be used to avoid performing attention on padding token indices. You can
also use *attention_mask* for the same result (see above), kept here for compatibility. Indices selected in
`[0, ..., input_ids.size(-1)]`.
cache (`dict[str, torch.FloatTensor]`, *optional*):
Instance of `EncoderDecoderCache` that contains precomputed KV states. Can be used to speed up sequential
decoding.
is_impossible (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels whether a question has an answer or no answer (SQuAD 2.0)
cls_index (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for position (index) of the classification token to use as input for computing plausibility of the
answer.
p_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
Optional mask of tokens which can't be in answers (e.g. [CLS], [PAD], ...). 1.0 means token should be
masked. 0.0 mean token is not masked.
Example:
```python
>>> from transformers import AutoTokenizer, FlaubertForQuestionAnswering
>>> import torch
>>> tokenizer = AutoTokenizer.from_pretrained("FacebookAI/xlm-mlm-en-2048")
>>> model = FlaubertForQuestionAnswering.from_pretrained("FacebookAI/xlm-mlm-en-2048")
>>> input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(
... 0
... ) # Batch size 1
>>> start_positions = torch.tensor([1])
>>> end_positions = torch.tensor([3])
>>> outputs = model(input_ids, start_positions=start_positions, end_positions=end_positions)
>>> loss = outputs.loss
```'''
pass
| 5
| 1
| 54
| 7
| 31
| 17
| 2
| 0.51
| 1
| 6
| 2
| 0
| 2
| 2
| 2
| 5
| 112
| 14
| 65
| 28
| 41
| 33
| 14
| 8
| 11
| 3
| 2
| 1
| 4
|
2,522
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/flaubert/modeling_flaubert.py
|
transformers.models.flaubert.modeling_flaubert.FlaubertForQuestionAnsweringSimple
|
import torch
from ...utils import ModelOutput, auto_docstring, logging
from ...modeling_outputs import BaseModelOutput, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput
from typing import Callable, Optional, Union
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from torch import nn
@auto_docstring(custom_intro='\n Flaubert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear\n layers on top of the hidden-states output to compute `span start logits` and `span end logits`).\n ')
class FlaubertForQuestionAnsweringSimple(FlaubertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.transformer = FlaubertModel(config)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
self.post_init()
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, langs: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, lengths: Optional[torch.Tensor]=None, cache: Optional[dict[str, torch.Tensor]]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, start_positions: Optional[torch.Tensor]=None, end_positions: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, QuestionAnsweringModelOutput]:
"""
langs (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
A parallel sequence of tokens to be used to indicate the language of each token in the input. Indices are
languages ids which can be obtained from the language names by using two conversion mappings provided in
the configuration of the model (only provided for multilingual models). More precisely, the *language name
to language id* mapping is in `model.config.lang2id` (which is a dictionary string to int) and the
*language id to language name* mapping is in `model.config.id2lang` (dictionary int to string).
See usage examples detailed in the [multilingual documentation](../multilingual).
lengths (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Length of each sentence that can be used to avoid performing attention on padding token indices. You can
also use *attention_mask* for the same result (see above), kept here for compatibility. Indices selected in
`[0, ..., input_ids.size(-1)]`.
cache (`dict[str, torch.FloatTensor]`, *optional*):
Instance of `EncoderDecoderCache` that contains precomputed KV states. Can be used to speed up sequential
decoding.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
transformer_outputs = self.transformer(input_ids, attention_mask=attention_mask, langs=langs, token_type_ids=token_type_ids, position_ids=position_ids, lengths=lengths, cache=cache, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
sequence_output = transformer_outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1).contiguous()
end_logits = end_logits.squeeze(-1).contiguous()
total_loss = None
if start_positions is not None and end_positions is not None:
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
ignored_index = start_logits.size(1)
start_positions = start_positions.clamp(0, ignored_index)
end_positions = end_positions.clamp(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
if not return_dict:
output = (start_logits, end_logits) + transformer_outputs[1:]
return (total_loss,) + output if total_loss is not None else output
return QuestionAnsweringModelOutput(loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions)
|
@auto_docstring(custom_intro='\n Flaubert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear\n layers on top of the hidden-states output to compute `span start logits` and `span end logits`).\n ')
class FlaubertForQuestionAnsweringSimple(FlaubertPreTrainedModel):
def __init__(self, config):
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, langs: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, lengths: Optional[torch.Tensor]=None, cache: Optional[dict[str, torch.Tensor]]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, start_positions: Optional[torch.Tensor]=None, end_positions: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, QuestionAnsweringModelOutput]:
'''
langs (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
A parallel sequence of tokens to be used to indicate the language of each token in the input. Indices are
languages ids which can be obtained from the language names by using two conversion mappings provided in
the configuration of the model (only provided for multilingual models). More precisely, the *language name
to language id* mapping is in `model.config.lang2id` (which is a dictionary string to int) and the
*language id to language name* mapping is in `model.config.id2lang` (dictionary int to string).
See usage examples detailed in the [multilingual documentation](../multilingual).
lengths (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Length of each sentence that can be used to avoid performing attention on padding token indices. You can
also use *attention_mask* for the same result (see above), kept here for compatibility. Indices selected in
`[0, ..., input_ids.size(-1)]`.
cache (`dict[str, torch.FloatTensor]`, *optional*):
Instance of `EncoderDecoderCache` that contains precomputed KV states. Can be used to speed up sequential
decoding.
'''
pass
| 5
| 1
| 44
| 5
| 33
| 7
| 4
| 0.18
| 1
| 6
| 2
| 0
| 2
| 2
| 2
| 5
| 95
| 10
| 72
| 32
| 47
| 13
| 31
| 15
| 28
| 7
| 2
| 2
| 8
|
2,523
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/flaubert/modeling_flaubert.py
|
transformers.models.flaubert.modeling_flaubert.FlaubertForSequenceClassification
|
import torch
from ...utils import ModelOutput, auto_docstring, logging
from ...modeling_outputs import BaseModelOutput, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput
from typing import Callable, Optional, Union
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
@auto_docstring(custom_intro='\n Flaubert Model with a sequence classification/regression head on top (a linear layer on top of the pooled output)\n e.g. for GLUE tasks.\n ')
class FlaubertForSequenceClassification(FlaubertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.config = config
self.transformer = FlaubertModel(config)
self.sequence_summary = FlaubertSequenceSummary(config)
self.post_init()
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, langs: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, lengths: Optional[torch.Tensor]=None, cache: Optional[dict[str, torch.Tensor]]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, SequenceClassifierOutput]:
"""
langs (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
A parallel sequence of tokens to be used to indicate the language of each token in the input. Indices are
languages ids which can be obtained from the language names by using two conversion mappings provided in
the configuration of the model (only provided for multilingual models). More precisely, the *language name
to language id* mapping is in `model.config.lang2id` (which is a dictionary string to int) and the
*language id to language name* mapping is in `model.config.id2lang` (dictionary int to string).
See usage examples detailed in the [multilingual documentation](../multilingual).
lengths (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Length of each sentence that can be used to avoid performing attention on padding token indices. You can
also use *attention_mask* for the same result (see above), kept here for compatibility. Indices selected in
`[0, ..., input_ids.size(-1)]`.
cache (`dict[str, torch.FloatTensor]`, *optional*):
Instance of `EncoderDecoderCache` that contains precomputed KV states. Can be used to speed up sequential
decoding.
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
transformer_outputs = self.transformer(input_ids, attention_mask=attention_mask, langs=langs, token_type_ids=token_type_ids, position_ids=position_ids, lengths=lengths, cache=cache, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
output = transformer_outputs[0]
logits = self.sequence_summary(output)
loss = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
self.config.problem_type = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = 'single_label_classification'
else:
self.config.problem_type = 'multi_label_classification'
if self.config.problem_type == 'regression':
loss_fct = MSELoss()
if self.num_labels == 1:
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(logits, labels)
elif self.config.problem_type == 'single_label_classification':
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
elif self.config.problem_type == 'multi_label_classification':
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
if not return_dict:
output = (logits,) + transformer_outputs[1:]
return (loss,) + output if loss is not None else output
return SequenceClassifierOutput(loss=loss, logits=logits, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions)
|
@auto_docstring(custom_intro='\n Flaubert Model with a sequence classification/regression head on top (a linear layer on top of the pooled output)\n e.g. for GLUE tasks.\n ')
class FlaubertForSequenceClassification(FlaubertPreTrainedModel):
def __init__(self, config):
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, langs: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, lengths: Optional[torch.Tensor]=None, cache: Optional[dict[str, torch.Tensor]]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, SequenceClassifierOutput]:
'''
langs (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
A parallel sequence of tokens to be used to indicate the language of each token in the input. Indices are
languages ids which can be obtained from the language names by using two conversion mappings provided in
the configuration of the model (only provided for multilingual models). More precisely, the *language name
to language id* mapping is in `model.config.lang2id` (which is a dictionary string to int) and the
*language id to language name* mapping is in `model.config.id2lang` (dictionary int to string).
See usage examples detailed in the [multilingual documentation](../multilingual).
lengths (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Length of each sentence that can be used to avoid performing attention on padding token indices. You can
also use *attention_mask* for the same result (see above), kept here for compatibility. Indices selected in
`[0, ..., input_ids.size(-1)]`.
cache (`dict[str, torch.FloatTensor]`, *optional*):
Instance of `EncoderDecoderCache` that contains precomputed KV states. Can be used to speed up sequential
decoding.
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
'''
pass
| 5
| 1
| 43
| 4
| 35
| 4
| 7
| 0.09
| 1
| 7
| 2
| 0
| 2
| 4
| 2
| 5
| 93
| 9
| 77
| 28
| 53
| 7
| 33
| 12
| 30
| 12
| 2
| 3
| 13
|
2,524
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/flaubert/modeling_flaubert.py
|
transformers.models.flaubert.modeling_flaubert.FlaubertForTokenClassification
|
import torch
from ...utils import ModelOutput, auto_docstring, logging
from ...modeling_outputs import BaseModelOutput, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput
from typing import Callable, Optional, Union
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from torch import nn
@auto_docstring
class FlaubertForTokenClassification(FlaubertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.transformer = FlaubertModel(config)
self.dropout = nn.Dropout(config.dropout)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.post_init()
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, langs: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, lengths: Optional[torch.Tensor]=None, cache: Optional[dict[str, torch.Tensor]]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, TokenClassifierOutput]:
"""
langs (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
A parallel sequence of tokens to be used to indicate the language of each token in the input. Indices are
languages ids which can be obtained from the language names by using two conversion mappings provided in
the configuration of the model (only provided for multilingual models). More precisely, the *language name
to language id* mapping is in `model.config.lang2id` (which is a dictionary string to int) and the
*language id to language name* mapping is in `model.config.id2lang` (dictionary int to string).
See usage examples detailed in the [multilingual documentation](../multilingual).
lengths (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Length of each sentence that can be used to avoid performing attention on padding token indices. You can
also use *attention_mask* for the same result (see above), kept here for compatibility. Indices selected in
`[0, ..., input_ids.size(-1)]`.
cache (`dict[str, torch.FloatTensor]`, *optional*):
Instance of `EncoderDecoderCache` that contains precomputed KV states. Can be used to speed up sequential
decoding.
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.transformer(input_ids, attention_mask=attention_mask, langs=langs, token_type_ids=token_type_ids, position_ids=position_ids, lengths=lengths, cache=cache, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[1:]
return (loss,) + output if loss is not None else output
return TokenClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
|
@auto_docstring
class FlaubertForTokenClassification(FlaubertPreTrainedModel):
def __init__(self, config):
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, langs: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, lengths: Optional[torch.Tensor]=None, cache: Optional[dict[str, torch.Tensor]]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, TokenClassifierOutput]:
'''
langs (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
A parallel sequence of tokens to be used to indicate the language of each token in the input. Indices are
languages ids which can be obtained from the language names by using two conversion mappings provided in
the configuration of the model (only provided for multilingual models). More precisely, the *language name
to language id* mapping is in `model.config.lang2id` (which is a dictionary string to int) and the
*language id to language name* mapping is in `model.config.id2lang` (dictionary int to string).
See usage examples detailed in the [multilingual documentation](../multilingual).
lengths (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Length of each sentence that can be used to avoid performing attention on padding token indices. You can
also use *attention_mask* for the same result (see above), kept here for compatibility. Indices selected in
`[0, ..., input_ids.size(-1)]`.
cache (`dict[str, torch.FloatTensor]`, *optional*):
Instance of `EncoderDecoderCache` that contains precomputed KV states. Can be used to speed up sequential
decoding.
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
'''
pass
| 5
| 1
| 34
| 4
| 27
| 3
| 3
| 0.08
| 1
| 6
| 2
| 0
| 2
| 4
| 2
| 5
| 75
| 9
| 61
| 29
| 37
| 5
| 22
| 13
| 19
| 5
| 2
| 1
| 6
|
2,525
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/flaubert/modeling_flaubert.py
|
transformers.models.flaubert.modeling_flaubert.FlaubertModel
|
from ...cache_utils import DynamicCache, EncoderDecoderCache
import torch
from ...utils import ModelOutput, auto_docstring, logging
from ...modeling_outputs import BaseModelOutput, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput
from typing import Callable, Optional, Union
from torch import nn
@auto_docstring
class FlaubertModel(FlaubertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.is_encoder = config.is_encoder
self.is_decoder = not config.is_encoder
if self.is_decoder:
raise NotImplementedError('Currently Flaubert can only be used as an encoder')
self.causal = config.causal
self.n_langs = config.n_langs
self.use_lang_emb = config.use_lang_emb
self.n_words = config.n_words
self.eos_index = config.eos_index
self.pad_index = config.pad_index
self.dim = config.emb_dim
self.hidden_dim = self.dim * 4
self.n_heads = config.n_heads
self.n_layers = config.n_layers
self.dropout = config.dropout
self.attention_dropout = config.attention_dropout
assert self.dim % self.n_heads == 0, 'transformer dim must be a multiple of n_heads'
self.position_embeddings = nn.Embedding(config.max_position_embeddings, self.dim)
if config.n_langs > 1 and config.use_lang_emb:
self.lang_embeddings = nn.Embedding(self.n_langs, self.dim)
self.embeddings = nn.Embedding(self.n_words, self.dim, padding_idx=self.pad_index)
self.layer_norm_emb = nn.LayerNorm(self.dim, eps=config.layer_norm_eps)
self.attentions = nn.ModuleList()
self.layer_norm1 = nn.ModuleList()
self.ffns = nn.ModuleList()
self.layer_norm2 = nn.ModuleList()
for i in range(self.n_layers):
self.attentions.append(MultiHeadAttention(self.n_heads, self.dim, config=config, layer_idx=i))
self.layer_norm1.append(nn.LayerNorm(self.dim, eps=config.layer_norm_eps))
self.ffns.append(TransformerFFN(self.dim, self.hidden_dim, self.dim, config=config))
self.layer_norm2.append(nn.LayerNorm(self.dim, eps=config.layer_norm_eps))
if hasattr(config, 'pruned_heads'):
pruned_heads = config.pruned_heads.copy().items()
config.pruned_heads = {}
for layer, heads in pruned_heads:
if self.attentions[int(layer)].n_heads == config.n_heads:
self.prune_heads({int(layer): list(map(int, heads))})
self.post_init()
self.layerdrop = getattr(config, 'layerdrop', 0.0)
self.pre_norm = getattr(config, 'pre_norm', False)
self.register_buffer('position_ids', torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False)
def get_input_embeddings(self):
return self.embeddings
def set_input_embeddings(self, new_embeddings):
self.embeddings = new_embeddings
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.attentions[layer].prune_heads(heads)
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, langs: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, lengths: Optional[torch.LongTensor]=None, cache: Optional[dict[str, torch.FloatTensor]]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None) -> Union[tuple, BaseModelOutput]:
"""
langs (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
A parallel sequence of tokens to be used to indicate the language of each token in the input. Indices are
languages ids which can be obtained from the language names by using two conversion mappings provided in
the configuration of the model (only provided for multilingual models). More precisely, the *language name
to language id* mapping is in `model.config.lang2id` (which is a dictionary string to int) and the
*language id to language name* mapping is in `model.config.id2lang` (dictionary int to string).
See usage examples detailed in the [multilingual documentation](../multilingual).
lengths (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Length of each sentence that can be used to avoid performing attention on padding token indices. You can
also use `attention_mask` for the same result (see above), kept here for compatibility. Indices selected in
`[0, ..., input_ids.size(-1)]`:
cache (`dict[str, torch.FloatTensor]`, *optional*):
Dictionary strings to `torch.FloatTensor` that contains precomputed hidden-states (key and values in the
attention blocks) as computed by the model (see `cache` output below). Can be used to speed up sequential
decoding. The dictionary object will be modified in-place during the forward pass to add newly computed
hidden-states.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None:
bs, slen = input_ids.size()
else:
bs, slen = inputs_embeds.size()[:-1]
device = input_ids.device if input_ids is not None else inputs_embeds.device
if cache is None:
cache = EncoderDecoderCache(DynamicCache(config=self.config), DynamicCache(config=self.config))
if isinstance(cache, tuple):
cache = EncoderDecoderCache.from_legacy_cache(cache)
if lengths is None:
if input_ids is not None:
lengths = (input_ids != self.pad_index).sum(dim=1).long()
else:
lengths = torch.tensor([slen] * bs, device=device)
assert lengths.size(0) == bs
assert lengths.max().item() <= slen
mask, attn_mask = get_masks(slen, lengths, self.causal, padding_mask=attention_mask)
if position_ids is None:
if hasattr(self, 'position_ids'):
position_ids = self.position_ids[:, :slen]
position_ids = position_ids.expand((bs, slen))
else:
position_ids = torch.arange(slen, dtype=torch.long, device=device)
position_ids = position_ids.unsqueeze(0).expand((bs, slen))
else:
assert position_ids.size() == (bs, slen)
if langs is not None:
assert langs.size() == (bs, slen)
head_mask = self.get_head_mask(head_mask, self.config.n_layers)
if cache is not None and input_ids is not None:
_slen = slen - cache.get_seq_length()
input_ids = input_ids[:, -_slen:]
position_ids = position_ids[:, -_slen:]
if langs is not None:
langs = langs[:, -_slen:]
mask = mask[:, -_slen:]
attn_mask = attn_mask[:, -_slen:]
if inputs_embeds is None:
inputs_embeds = self.embeddings(input_ids)
tensor = inputs_embeds + self.position_embeddings(position_ids).expand_as(inputs_embeds)
if langs is not None and self.use_lang_emb and (self.config.n_langs > 1):
tensor = tensor + self.lang_embeddings(langs)
if token_type_ids is not None:
tensor = tensor + self.embeddings(token_type_ids)
tensor = self.layer_norm_emb(tensor)
tensor = nn.functional.dropout(tensor, p=self.dropout, training=self.training)
tensor *= mask.unsqueeze(-1).to(tensor.dtype)
hidden_states = () if output_hidden_states else None
attentions = () if output_attentions else None
for i in range(self.n_layers):
if self.training:
dropout_probability = torch.rand([])
if dropout_probability < self.layerdrop:
continue
if output_hidden_states:
hidden_states = hidden_states + (tensor,)
if not self.pre_norm:
attn_outputs = self.attentions[i](tensor, attn_mask, cache=cache, head_mask=head_mask[i], output_attentions=output_attentions, cache_position=cache_position)
attn = attn_outputs[0]
if output_attentions:
attentions = attentions + (attn_outputs[1],)
attn = nn.functional.dropout(attn, p=self.dropout, training=self.training)
tensor = tensor + attn
tensor = self.layer_norm1[i](tensor)
else:
tensor_normalized = self.layer_norm1[i](tensor)
attn_outputs = self.attentions[i](tensor_normalized, attn_mask, cache=cache, head_mask=head_mask[i])
attn = attn_outputs[0]
if output_attentions:
attentions = attentions + (attn_outputs[1],)
attn = nn.functional.dropout(attn, p=self.dropout, training=self.training)
tensor = tensor + attn
if not self.pre_norm:
tensor = tensor + self.ffns[i](tensor)
tensor = self.layer_norm2[i](tensor)
else:
tensor_normalized = self.layer_norm2[i](tensor)
tensor = tensor + self.ffns[i](tensor_normalized)
tensor *= mask.unsqueeze(-1).to(tensor.dtype)
if output_hidden_states:
hidden_states = hidden_states + (tensor,)
if not return_dict:
return tuple((v for v in [tensor, hidden_states, attentions] if v is not None))
return BaseModelOutput(last_hidden_state=tensor, hidden_states=hidden_states, attentions=attentions)
|
@auto_docstring
class FlaubertModel(FlaubertPreTrainedModel):
def __init__(self, config):
pass
def get_input_embeddings(self):
pass
def set_input_embeddings(self, new_embeddings):
pass
def _prune_heads(self, heads_to_prune):
'''
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
'''
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, langs: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, lengths: Optional[torch.LongTensor]=None, cache: Optional[dict[str, torch.FloatTensor]]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None) -> Union[tuple, BaseModelOutput]:
'''
langs (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
A parallel sequence of tokens to be used to indicate the language of each token in the input. Indices are
languages ids which can be obtained from the language names by using two conversion mappings provided in
the configuration of the model (only provided for multilingual models). More precisely, the *language name
to language id* mapping is in `model.config.lang2id` (which is a dictionary string to int) and the
*language id to language name* mapping is in `model.config.id2lang` (dictionary int to string).
See usage examples detailed in the [multilingual documentation](../multilingual).
lengths (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Length of each sentence that can be used to avoid performing attention on padding token indices. You can
also use `attention_mask` for the same result (see above), kept here for compatibility. Indices selected in
`[0, ..., input_ids.size(-1)]`:
cache (`dict[str, torch.FloatTensor]`, *optional*):
Dictionary strings to `torch.FloatTensor` that contains precomputed hidden-states (key and values in the
attention blocks) as computed by the model (see `cache` output below). Can be used to speed up sequential
decoding. The dictionary object will be modified in-place during the forward pass to add newly computed
hidden-states.
'''
pass
| 8
| 2
| 49
| 6
| 32
| 12
| 8
| 0.39
| 1
| 13
| 3
| 0
| 5
| 24
| 5
| 8
| 261
| 35
| 167
| 61
| 141
| 65
| 131
| 46
| 125
| 29
| 2
| 3
| 40
|
2,526
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/flaubert/modeling_flaubert.py
|
transformers.models.flaubert.modeling_flaubert.FlaubertPreTrainedModel
|
from torch import nn
import torch
from ...utils import ModelOutput, auto_docstring, logging
from ...modeling_utils import PreTrainedModel
from .configuration_flaubert import FlaubertConfig
@auto_docstring
class FlaubertPreTrainedModel(PreTrainedModel):
config: FlaubertConfig
base_model_prefix = 'transformer'
def __init__(self, *inputs, **kwargs):
super().__init__(*inputs, **kwargs)
@property
def dummy_inputs(self):
inputs_list = torch.tensor([[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]])
attns_list = torch.tensor([[1, 1, 0, 0, 1], [1, 1, 1, 0, 0], [1, 0, 0, 1, 1]])
if self.config.use_lang_emb and self.config.n_langs > 1:
langs_list = torch.tensor([[1, 1, 0, 0, 1], [1, 1, 1, 0, 0], [1, 0, 0, 1, 1]])
else:
langs_list = None
return {'input_ids': inputs_list, 'attention_mask': attns_list, 'langs': langs_list}
def _init_weights(self, module):
"""Initialize the weights."""
if isinstance(module, nn.Embedding):
if self.config is not None and self.config.embed_init_std is not None:
nn.init.normal_(module.weight, mean=0, std=self.config.embed_init_std)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
if isinstance(module, nn.Linear):
if self.config is not None and self.config.init_std is not None:
nn.init.normal_(module.weight, mean=0, std=self.config.init_std)
if module.bias is not None:
nn.init.constant_(module.bias, 0.0)
if isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, FlaubertModel) and self.config.sinusoidal_embeddings:
create_sinusoidal_embeddings(self.config.max_position_embeddings, self.config.emb_dim, out=module.position_embeddings.weight)
|
@auto_docstring
class FlaubertPreTrainedModel(PreTrainedModel):
def __init__(self, *inputs, **kwargs):
pass
@property
def dummy_inputs(self):
pass
def _init_weights(self, module):
'''Initialize the weights.'''
pass
| 6
| 1
| 10
| 0
| 9
| 0
| 4
| 0.15
| 1
| 2
| 1
| 7
| 3
| 0
| 3
| 3
| 42
| 4
| 33
| 11
| 28
| 5
| 29
| 10
| 25
| 9
| 1
| 3
| 12
|
2,527
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/flaubert/modeling_flaubert.py
|
transformers.models.flaubert.modeling_flaubert.FlaubertPredLayer
|
from ...utils import ModelOutput, auto_docstring, logging
from torch import nn
@auto_docstring(custom_intro='\n The bare Flaubert Model transformer outputting raw hidden-states without any specific head on top.\n ')
class FlaubertPredLayer(nn.Module):
"""
Prediction layer (cross_entropy or adaptive_softmax).
"""
def __init__(self, config):
super().__init__()
self.asm = config.asm
self.n_words = config.n_words
self.pad_index = config.pad_index
dim = config.emb_dim
if config.asm is False:
self.proj = nn.Linear(dim, config.n_words, bias=True)
else:
self.proj = nn.AdaptiveLogSoftmaxWithLoss(in_features=dim, n_classes=config.n_words, cutoffs=config.asm_cutoffs, div_value=config.asm_div_value, head_bias=True)
def forward(self, x, y=None):
"""Compute the loss, and optionally the scores."""
outputs = ()
if self.asm is False:
scores = self.proj(x)
outputs = (scores,) + outputs
if y is not None:
loss = nn.functional.cross_entropy(scores.view(-1, self.n_words), y.view(-1), reduction='mean')
outputs = (loss,) + outputs
else:
scores = self.proj.log_prob(x)
outputs = (scores,) + outputs
if y is not None:
_, loss = self.proj(x, y)
outputs = (loss,) + outputs
return outputs
|
@auto_docstring(custom_intro='\n The bare Flaubert Model transformer outputting raw hidden-states without any specific head on top.\n ')
class FlaubertPredLayer(nn.Module):
'''
Prediction layer (cross_entropy or adaptive_softmax).
'''
def __init__(self, config):
pass
def forward(self, x, y=None):
'''Compute the loss, and optionally the scores.'''
pass
| 4
| 2
| 17
| 1
| 16
| 1
| 3
| 0.16
| 1
| 1
| 0
| 0
| 2
| 4
| 2
| 12
| 40
| 4
| 32
| 12
| 29
| 5
| 24
| 12
| 21
| 4
| 1
| 2
| 6
|
2,528
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/flaubert/modeling_flaubert.py
|
transformers.models.flaubert.modeling_flaubert.FlaubertWithLMHeadModel
|
from ...generation import GenerationMixin
from typing import Callable, Optional, Union
from ...modeling_outputs import BaseModelOutput, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput
from ...utils import ModelOutput, auto_docstring, logging
import torch
@auto_docstring(custom_intro='\n The Flaubert Model transformer with a language modeling head on top (linear layer with weights tied to the input\n embeddings).\n ')
class FlaubertWithLMHeadModel(FlaubertPreTrainedModel, GenerationMixin):
_tied_weights_keys = ['pred_layer.proj.weight']
def __init__(self, config):
super().__init__(config)
self.transformer = FlaubertModel(config)
self.pred_layer = FlaubertPredLayer(config)
self.post_init()
def get_output_embeddings(self):
return self.pred_layer.proj
def set_output_embeddings(self, new_embeddings):
self.pred_layer.proj = new_embeddings
def prepare_inputs_for_generation(self, input_ids, **kwargs):
mask_token_id = self.config.mask_token_id
lang_id = self.config.lang_id
effective_batch_size = input_ids.shape[0]
mask_token = torch.full((effective_batch_size, 1), mask_token_id, dtype=torch.long, device=input_ids.device)
input_ids = torch.cat([input_ids, mask_token], dim=1)
if lang_id is not None:
langs = torch.full_like(input_ids, lang_id)
else:
langs = None
return {'input_ids': input_ids, 'langs': langs}
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, langs: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, lengths: Optional[torch.Tensor]=None, cache: Optional[dict[str, torch.Tensor]]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, MaskedLMOutput]:
"""
langs (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
A parallel sequence of tokens to be used to indicate the language of each token in the input. Indices are
languages ids which can be obtained from the language names by using two conversion mappings provided in
the configuration of the model (only provided for multilingual models). More precisely, the *language name
to language id* mapping is in `model.config.lang2id` (which is a dictionary string to int) and the
*language id to language name* mapping is in `model.config.id2lang` (dictionary int to string).
See usage examples detailed in the [multilingual documentation](../multilingual).
lengths (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Length of each sentence that can be used to avoid performing attention on padding token indices. You can
also use `attention_mask` for the same result (see above), kept here for compatibility. Indices selected in
`[0, ..., input_ids.size(-1)]`:
cache (`dict[str, torch.FloatTensor]`, *optional*):
Dictionary strings to `torch.FloatTensor` that contains precomputed hidden-states (key and values in the
attention blocks) as computed by the model (see `cache` output below). Can be used to speed up sequential
decoding. The dictionary object will be modified in-place during the forward pass to add newly computed
hidden-states.
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
`labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100`
are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
transformer_outputs = self.transformer(input_ids, attention_mask=attention_mask, langs=langs, token_type_ids=token_type_ids, position_ids=position_ids, lengths=lengths, cache=cache, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
output = transformer_outputs[0]
outputs = self.pred_layer(output, labels)
if not return_dict:
return outputs + transformer_outputs[1:]
return MaskedLMOutput(loss=outputs[0] if labels is not None else None, logits=outputs[0] if labels is None else outputs[1], hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions)
|
@auto_docstring(custom_intro='\n The Flaubert Model transformer with a language modeling head on top (linear layer with weights tied to the input\n embeddings).\n ')
class FlaubertWithLMHeadModel(FlaubertPreTrainedModel, GenerationMixin):
def __init__(self, config):
pass
def get_output_embeddings(self):
pass
def set_output_embeddings(self, new_embeddings):
pass
def prepare_inputs_for_generation(self, input_ids, **kwargs):
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, langs: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, lengths: Optional[torch.Tensor]=None, cache: Optional[dict[str, torch.Tensor]]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, MaskedLMOutput]:
'''
langs (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
A parallel sequence of tokens to be used to indicate the language of each token in the input. Indices are
languages ids which can be obtained from the language names by using two conversion mappings provided in
the configuration of the model (only provided for multilingual models). More precisely, the *language name
to language id* mapping is in `model.config.lang2id` (which is a dictionary string to int) and the
*language id to language name* mapping is in `model.config.id2lang` (dictionary int to string).
See usage examples detailed in the [multilingual documentation](../multilingual).
lengths (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Length of each sentence that can be used to avoid performing attention on padding token indices. You can
also use `attention_mask` for the same result (see above), kept here for compatibility. Indices selected in
`[0, ..., input_ids.size(-1)]`:
cache (`dict[str, torch.FloatTensor]`, *optional*):
Dictionary strings to `torch.FloatTensor` that contains precomputed hidden-states (key and values in the
attention blocks) as computed by the model (see `cache` output below). Can be used to speed up sequential
decoding. The dictionary object will be modified in-place during the forward pass to add newly computed
hidden-states.
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
`labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100`
are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`
'''
pass
| 8
| 1
| 15
| 1
| 12
| 2
| 2
| 0.13
| 2
| 7
| 3
| 0
| 5
| 2
| 5
| 8
| 90
| 12
| 70
| 33
| 42
| 9
| 29
| 17
| 23
| 5
| 2
| 1
| 10
|
2,529
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/flaubert/modeling_flaubert.py
|
transformers.models.flaubert.modeling_flaubert.MultiHeadAttention
|
import math
from ...cache_utils import DynamicCache, EncoderDecoderCache
import torch
from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
from torch import nn
class MultiHeadAttention(nn.Module):
def __init__(self, n_heads, dim, config, layer_idx: int=0):
super().__init__()
self.layer_id = layer_idx
self.dim = dim
self.n_heads = n_heads
self.head_dim = dim // n_heads
self.dropout = config.attention_dropout
assert self.dim % self.n_heads == 0
self.q_lin = nn.Linear(dim, dim)
self.k_lin = nn.Linear(dim, dim)
self.v_lin = nn.Linear(dim, dim)
self.out_lin = nn.Linear(dim, dim)
self.pruned_heads = set()
def prune_heads(self, heads):
attention_head_size = self.dim // self.n_heads
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(heads, self.n_heads, attention_head_size, self.pruned_heads)
self.q_lin = prune_linear_layer(self.q_lin, index)
self.k_lin = prune_linear_layer(self.k_lin, index)
self.v_lin = prune_linear_layer(self.v_lin, index)
self.out_lin = prune_linear_layer(self.out_lin, index, dim=1)
self.n_heads = self.n_heads - len(heads)
self.dim = attention_head_size * self.n_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(self, input, mask, kv=None, cache=None, head_mask=None, output_attentions=False, cache_position=None):
"""
Self-attention (if kv is None) or attention over source sentence (provided by kv).
"""
bs, qlen, dim = input.size()
is_cross_attention = kv is not None
mask_reshape = (bs, 1, qlen, -1) if mask.dim() == 3 else (bs, 1, 1, -1)
q = self.q_lin(input).view(bs, -1, self.n_heads, self.head_dim).transpose(1, 2)
if cache is not None:
if isinstance(cache, EncoderDecoderCache):
is_updated = cache.is_updated.get(self.layer_id)
if is_cross_attention:
curr_past_key_value = cache.cross_attention_cache
else:
curr_past_key_value = cache.self_attention_cache
else:
curr_past_key_value = cache
current_states = kv if is_cross_attention else input
if is_cross_attention and cache is not None and is_updated:
k = curr_past_key_value.key_cache[self.layer_id]
v = curr_past_key_value.value_cache[self.layer_id]
else:
k = self.k_lin(current_states)
v = self.v_lin(current_states)
k = k.view(bs, -1, self.n_heads, self.head_dim).transpose(1, 2)
v = v.view(bs, -1, self.n_heads, self.head_dim).transpose(1, 2)
if cache is not None:
cache_position = cache_position if not is_cross_attention else None
k, v = curr_past_key_value.update(k, v, self.layer_id, {'cache_position': cache_position})
if is_cross_attention:
cache.is_updated[self.layer_id] = True
q = q / math.sqrt(self.head_dim)
scores = torch.matmul(q, k.transpose(2, 3))
mask = (mask == 0).view(mask_reshape).expand_as(scores)
scores.masked_fill_(mask, torch.finfo(scores.dtype).min)
weights = nn.functional.softmax(scores.float(), dim=-1).type_as(scores)
weights = nn.functional.dropout(weights, p=self.dropout, training=self.training)
if head_mask is not None:
weights = weights * head_mask
context = torch.matmul(weights, v)
context = context.transpose(1, 2).contiguous().view(bs, -1, self.n_heads * self.head_dim)
outputs = (self.out_lin(context),)
if output_attentions:
outputs = outputs + (weights,)
return outputs
|
class MultiHeadAttention(nn.Module):
def __init__(self, n_heads, dim, config, layer_idx: int=0):
pass
def prune_heads(self, heads):
pass
def forward(self, input, mask, kv=None, cache=None, head_mask=None, output_attentions=False, cache_position=None):
'''
Self-attention (if kv is None) or attention over source sentence (provided by kv).
'''
pass
| 4
| 1
| 19
| 2
| 14
| 6
| 3
| 0.37
| 1
| 2
| 0
| 0
| 3
| 9
| 3
| 13
| 94
| 13
| 70
| 31
| 64
| 26
| 67
| 31
| 61
| 11
| 1
| 3
| 16
|
2,530
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/flaubert/modeling_flaubert.py
|
transformers.models.flaubert.modeling_flaubert.TransformerFFN
|
from torch import nn
from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
from ...activations import gelu, get_activation
class TransformerFFN(nn.Module):
def __init__(self, in_dim, dim_hidden, out_dim, config):
super().__init__()
self.dropout = config.dropout
self.lin1 = nn.Linear(in_dim, dim_hidden)
self.lin2 = nn.Linear(dim_hidden, out_dim)
self.act = gelu if config.gelu_activation else nn.functional.relu
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
def forward(self, input):
return apply_chunking_to_forward(self.ff_chunk, self.chunk_size_feed_forward, self.seq_len_dim, input)
def ff_chunk(self, input):
x = self.lin1(input)
x = self.act(x)
x = self.lin2(x)
x = nn.functional.dropout(x, p=self.dropout, training=self.training)
return x
|
class TransformerFFN(nn.Module):
def __init__(self, in_dim, dim_hidden, out_dim, config):
pass
def forward(self, input):
pass
def ff_chunk(self, input):
pass
| 4
| 0
| 5
| 0
| 5
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 3
| 6
| 3
| 13
| 19
| 2
| 17
| 11
| 13
| 0
| 17
| 11
| 13
| 2
| 1
| 0
| 4
|
2,531
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/flaubert/tokenization_flaubert.py
|
transformers.models.flaubert.tokenization_flaubert.FlaubertTokenizer
|
from ...tokenization_utils import PreTrainedTokenizer
from typing import Optional
import unicodedata
import os
import json
class FlaubertTokenizer(PreTrainedTokenizer):
"""
Construct a Flaubert tokenizer. Based on Byte-Pair Encoding. The tokenization process is the following:
- Moses preprocessing and tokenization.
- Normalizing all inputs text.
- The arguments `special_tokens` and the function `set_special_tokens`, can be used to add additional symbols (like
"__classify__") to a vocabulary.
- The argument `do_lowercase` controls lower casing (automatically set for pretrained vocabularies).
This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
this superclass for more information regarding those methods.
Args:
vocab_file (`str`):
Vocabulary file.
merges_file (`str`):
Merges file.
do_lowercase (`bool`, *optional*, defaults to `False`):
Controls lower casing.
unk_token (`str`, *optional*, defaults to `"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
bos_token (`str`, *optional*, defaults to `"<s>"`):
The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the beginning of
sequence. The token used is the `cls_token`.
</Tip>
sep_token (`str`, *optional*, defaults to `"</s>"`):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
sequence classification or for a text and a question for question answering. It is also used as the last
token of a sequence built with special tokens.
pad_token (`str`, *optional*, defaults to `"<pad>"`):
The token used for padding, for example when batching sequences of different lengths.
cls_token (`str`, *optional*, defaults to `"</s>"`):
The classifier token which is used when doing sequence classification (classification of the whole sequence
instead of per-token classification). It is the first token of the sequence when built with special tokens.
mask_token (`str`, *optional*, defaults to `"<special1>"`):
The token used for masking values. This is the token used when training this model with masked language
modeling. This is the token which the model will try to predict.
additional_special_tokens (`List[str]`, *optional*, defaults to `['<special0>', '<special1>', '<special2>', '<special3>', '<special4>', '<special5>', '<special6>', '<special7>', '<special8>', '<special9>']`):
List of additional special tokens.
lang2id (`Dict[str, int]`, *optional*):
Dictionary mapping languages string identifiers to their IDs.
id2lang (`Dict[int, str]`, *optional*):
Dictionary mapping language IDs to their string identifiers.
"""
vocab_files_names = VOCAB_FILES_NAMES
def __init__(self, vocab_file, merges_file, do_lowercase=False, unk_token='<unk>', bos_token='<s>', sep_token='</s>', pad_token='<pad>', cls_token='</s>', mask_token='<special1>', additional_special_tokens=['<special0>', '<special1>', '<special2>', '<special3>', '<special4>', '<special5>', '<special6>', '<special7>', '<special8>', '<special9>'], lang2id=None, id2lang=None, **kwargs):
do_lowercase_and_remove_accent = kwargs.pop('do_lowercase_and_remove_accent', None)
if do_lowercase_and_remove_accent is not None:
logger.warning("`do_lowercase_and_remove_accent` is passed as a keyword argument, but this won't do anything. `FlaubertTokenizer` will always set it to `False`.")
self.do_lowercase_and_remove_accent = False
self.do_lowercase = do_lowercase
try:
import sacremoses
except ImportError:
raise ImportError('You need to install sacremoses to use FlaubertTokenizer. See https://pypi.org/project/sacremoses/ for installation.')
self.sm = sacremoses
self.cache_moses_punct_normalizer = {}
self.cache_moses_tokenizer = {}
self.lang_with_custom_tokenizer = {'zh', 'th', 'ja'}
self.lang2id = lang2id
self.id2lang = id2lang
if lang2id is not None and id2lang is not None:
assert len(lang2id) == len(id2lang)
self.ja_word_tokenizer = None
self.zh_word_tokenizer = None
with open(vocab_file, encoding='utf-8') as vocab_handle:
self.encoder = json.load(vocab_handle)
self.decoder = {v: k for k, v in self.encoder.items()}
with open(merges_file, encoding='utf-8') as merges_handle:
merges = merges_handle.read().split('\n')[:-1]
merges = [tuple(merge.split()[:2]) for merge in merges]
self.bpe_ranks = dict(zip(merges, range(len(merges))))
self.cache = {}
super().__init__(do_lowercase=do_lowercase, unk_token=unk_token, bos_token=bos_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, additional_special_tokens=additional_special_tokens, lang2id=lang2id, id2lang=id2lang, **kwargs)
@property
def do_lower_case(self):
return self.do_lowercase_and_remove_accent
def moses_punct_norm(self, text, lang):
if lang not in self.cache_moses_punct_normalizer:
punct_normalizer = self.sm.MosesPunctNormalizer(lang=lang)
self.cache_moses_punct_normalizer[lang] = punct_normalizer
else:
punct_normalizer = self.cache_moses_punct_normalizer[lang]
return punct_normalizer.normalize(text)
def moses_tokenize(self, text, lang):
if lang not in self.cache_moses_tokenizer:
moses_tokenizer = self.sm.MosesTokenizer(lang=lang)
self.cache_moses_tokenizer[lang] = moses_tokenizer
else:
moses_tokenizer = self.cache_moses_tokenizer[lang]
return moses_tokenizer.tokenize(text, return_str=False, escape=False)
def moses_pipeline(self, text, lang):
text = replace_unicode_punct(text)
text = self.moses_punct_norm(text, lang)
text = remove_non_printing_char(text)
return text
def ja_tokenize(self, text):
if self.ja_word_tokenizer is None:
try:
import Mykytea
self.ja_word_tokenizer = Mykytea.Mykytea(f"-model {os.path.expanduser('~')}/local/share/kytea/model.bin")
except (AttributeError, ImportError):
logger.error("Make sure you install KyTea (https://github.com/neubig/kytea) and it's python wrapper (https://github.com/chezou/Mykytea-python) with the following steps")
logger.error('1. git clone git@github.com:neubig/kytea.git && cd kytea')
logger.error('2. autoreconf -i')
logger.error('3. ./configure --prefix=$HOME/local')
logger.error('4. make && make install')
logger.error('5. pip install kytea')
raise
return list(self.ja_word_tokenizer.getWS(text))
@property
def vocab_size(self):
return len(self.encoder)
def get_vocab(self):
return dict(self.encoder, **self.added_tokens_encoder)
def bpe(self, token):
word = tuple(token[:-1]) + (token[-1] + '</w>',)
if token in self.cache:
return self.cache[token]
pairs = get_pairs(word)
if not pairs:
return token + '</w>'
while True:
bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float('inf')))
if bigram not in self.bpe_ranks:
break
first, second = bigram
new_word = []
i = 0
while i < len(word):
try:
j = word.index(first, i)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
i = j
if word[i] == first and i < len(word) - 1 and (word[i + 1] == second):
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if len(word) == 1:
break
else:
pairs = get_pairs(word)
word = ' '.join(word)
if word == '\n </w>':
word = '\n</w>'
self.cache[token] = word
return word
def preprocess_text(self, text):
text = text.replace('``', '"').replace("''", '"')
text = convert_to_unicode(text)
text = unicodedata.normalize('NFC', text)
if self.do_lowercase:
text = text.lower()
return text
def _tokenize(self, text, bypass_tokenizer=False):
"""
Tokenize a string given language code using Moses.
Details of tokenization:
- [sacremoses](https://github.com/alvations/sacremoses): port of Moses
- Install with `pip install sacremoses`
Args:
- bypass_tokenizer: Allow users to preprocess and tokenize the sentences externally (default = False)
(bool). If True, we only apply BPE.
Returns:
List of tokens.
"""
lang = 'fr'
if lang and self.lang2id and (lang not in self.lang2id):
logger.error('Supplied language code not found in lang2id mapping. Please check that your language is supported by the loaded pretrained model.')
if bypass_tokenizer:
text = text.split()
else:
text = self.preprocess_text(text)
text = self.moses_pipeline(text, lang=lang)
text = self.moses_tokenize(text, lang=lang)
split_tokens = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(token).split(' ')))
return split_tokens
def _convert_token_to_id(self, token):
"""Converts a token (str) in an id using the vocab."""
return self.encoder.get(token, self.encoder.get(self.unk_token))
def _convert_id_to_token(self, index):
"""Converts an index (integer) in a token (str) using the vocab."""
return self.decoder.get(index, self.unk_token)
def convert_tokens_to_string(self, tokens):
"""Converts a sequence of tokens (string) in a single string."""
out_string = ''.join(tokens).replace('</w>', ' ').strip()
return out_string
def build_inputs_with_special_tokens(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]:
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. An XLM sequence has the following format:
- single sequence: `<s> X </s>`
- pair of sequences: `<s> A </s> B </s>`
Args:
token_ids_0 (`List[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
"""
bos = [self.bos_token_id]
sep = [self.sep_token_id]
if token_ids_1 is None:
return bos + token_ids_0 + sep
return bos + token_ids_0 + sep + token_ids_1 + sep
def get_special_tokens_mask(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None, already_has_special_tokens: bool=False) -> list[int]:
"""
Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer `prepare_for_model` method.
Args:
token_ids_0 (`List[int]`):
List of IDs.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
already_has_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not the token list is already formatted with special tokens for the model.
Returns:
`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
"""
if already_has_special_tokens:
return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True)
if token_ids_1 is not None:
return [1] + [0] * len(token_ids_0) + [1] + [0] * len(token_ids_1) + [1]
return [1] + [0] * len(token_ids_0) + [1]
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]:
if not os.path.isdir(save_directory):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
merge_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'])
with open(vocab_file, 'w', encoding='utf-8') as f:
f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + '\n')
index = 0
with open(merge_file, 'w', encoding='utf-8') as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]):
if index != token_index:
logger.warning(f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive. Please check that the tokenizer is not corrupted!')
index = token_index
writer.write(' '.join(bpe_tokens) + '\n')
index += 1
return (vocab_file, merge_file)
def __getstate__(self):
state = self.__dict__.copy()
state['sm'] = None
return state
def __setstate__(self, d):
self.__dict__ = d
try:
import sacremoses
except ImportError:
raise ImportError('You need to install sacremoses to use XLMTokenizer. See https://pypi.org/project/sacremoses/ for installation.')
self.sm = sacremoses
|
class FlaubertTokenizer(PreTrainedTokenizer):
'''
Construct a Flaubert tokenizer. Based on Byte-Pair Encoding. The tokenization process is the following:
- Moses preprocessing and tokenization.
- Normalizing all inputs text.
- The arguments `special_tokens` and the function `set_special_tokens`, can be used to add additional symbols (like
"__classify__") to a vocabulary.
- The argument `do_lowercase` controls lower casing (automatically set for pretrained vocabularies).
This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
this superclass for more information regarding those methods.
Args:
vocab_file (`str`):
Vocabulary file.
merges_file (`str`):
Merges file.
do_lowercase (`bool`, *optional*, defaults to `False`):
Controls lower casing.
unk_token (`str`, *optional*, defaults to `"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
bos_token (`str`, *optional*, defaults to `"<s>"`):
The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the beginning of
sequence. The token used is the `cls_token`.
</Tip>
sep_token (`str`, *optional*, defaults to `"</s>"`):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
sequence classification or for a text and a question for question answering. It is also used as the last
token of a sequence built with special tokens.
pad_token (`str`, *optional*, defaults to `"<pad>"`):
The token used for padding, for example when batching sequences of different lengths.
cls_token (`str`, *optional*, defaults to `"</s>"`):
The classifier token which is used when doing sequence classification (classification of the whole sequence
instead of per-token classification). It is the first token of the sequence when built with special tokens.
mask_token (`str`, *optional*, defaults to `"<special1>"`):
The token used for masking values. This is the token used when training this model with masked language
modeling. This is the token which the model will try to predict.
additional_special_tokens (`List[str]`, *optional*, defaults to `['<special0>', '<special1>', '<special2>', '<special3>', '<special4>', '<special5>', '<special6>', '<special7>', '<special8>', '<special9>']`):
List of additional special tokens.
lang2id (`Dict[str, int]`, *optional*):
Dictionary mapping languages string identifiers to their IDs.
id2lang (`Dict[int, str]`, *optional*):
Dictionary mapping language IDs to their string identifiers.
'''
def __init__(self, vocab_file, merges_file, do_lowercase=False, unk_token='<unk>', bos_token='<s>', sep_token='</s>', pad_token='<pad>', cls_token='</s>', mask_token='<special1>', additional_special_tokens=['<special0>', '<special1>', '<special2>', '<special3>', '<special4>', '<special5>', '<special6>', '<special7>', '<special8>', '<special9>'], lang2id=None, id2lang=None, **kwargs):
pass
@property
def do_lower_case(self):
pass
def moses_punct_norm(self, text, lang):
pass
def moses_tokenize(self, text, lang):
pass
def moses_pipeline(self, text, lang):
pass
def ja_tokenize(self, text):
pass
@property
def vocab_size(self):
pass
def get_vocab(self):
pass
def bpe(self, token):
pass
def preprocess_text(self, text):
pass
def _tokenize(self, text, bypass_tokenizer=False):
'''
Tokenize a string given language code using Moses.
Details of tokenization:
- [sacremoses](https://github.com/alvations/sacremoses): port of Moses
- Install with `pip install sacremoses`
Args:
- bypass_tokenizer: Allow users to preprocess and tokenize the sentences externally (default = False)
(bool). If True, we only apply BPE.
Returns:
List of tokens.
'''
pass
def _convert_token_to_id(self, token):
'''Converts a token (str) in an id using the vocab.'''
pass
def _convert_id_to_token(self, index):
'''Converts an index (integer) in a token (str) using the vocab.'''
pass
def convert_tokens_to_string(self, tokens):
'''Converts a sequence of tokens (string) in a single string.'''
pass
def build_inputs_with_special_tokens(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]:
'''
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. An XLM sequence has the following format:
- single sequence: `<s> X </s>`
- pair of sequences: `<s> A </s> B </s>`
Args:
token_ids_0 (`List[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
'''
pass
def get_special_tokens_mask(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None, already_has_special_tokens: bool=False) -> list[int]:
'''
Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer `prepare_for_model` method.
Args:
token_ids_0 (`List[int]`):
List of IDs.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
already_has_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not the token list is already formatted with special tokens for the model.
Returns:
`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
'''
pass
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]:
pass
def __getstate__(self):
pass
def __setstate__(self, d):
pass
| 22
| 7
| 18
| 2
| 13
| 3
| 3
| 0.47
| 1
| 13
| 0
| 0
| 20
| 15
| 20
| 109
| 443
| 66
| 257
| 102
| 199
| 120
| 180
| 64
| 156
| 10
| 3
| 3
| 51
|
2,532
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/flava/configuration_flava.py
|
transformers.models.flava.configuration_flava.FlavaConfig
|
from ...configuration_utils import PretrainedConfig
from typing import Any, Optional
class FlavaConfig(PretrainedConfig):
"""
[`FlavaConfig`] is the configuration class to store the configuration of a [`FlavaModel`]. It is used to
instantiate FLAVA model according to the specified arguments, defining the text model, image model, image codebook
and multimodal model configs. Instantiating a configuration with the defaults will yield a similar configuration to
that of the FLAVA [facebook/flava-full](https://huggingface.co/facebook/flava-full) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
text_config (`dict`, *optional*):
Dictionary of configuration options used to initialize [`FlavaTextConfig`].
image_config (`dict`, *optional*):
Dictionary of configuration options used to initialize [`FlavaImageConfig`].
multimodal_config (`dict`, *optional*):
Dictionary of configuration options used to initialize [`FlavaMultimodalConfig`].
hidden_size (`int`, *optional*, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
The epsilon used by the layer normalization layers.
projection_dim (`int`, *optional*, defaults to 512):
Dimensionality of text and image projection layers.
logit_scale_init_value (`float`, *optional*, defaults to 2.6592):
The initial value of the *logit_scale* parameter. Default is used as per the original FLAVA/CLIP
implementation.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
ce_ignore_index (`int`, *optional*, defaults to -100):
Cross entropy index to ignore.
mim_weight (`float`, *optional*, defaults to 1.0):
Weight to be assigned to MIM (Masked Image Modeling) unimodal loss
mlm_weight (`float`, *optional*, defaults to 1.0):
Weight to be assigned to MLM (Masked Language Modeling) unimodal loss
global_contrastive_weight (`float`, *optional*, defaults to 1.0):
Weight to be assigned to global contrastive cross-alignment loss.
itm_weight (`float`, *optional*, defaults to 1.0):
Weight to be assigned to image-text matching multimodal loss.
mmm_image_weight (`float`, *optional*, defaults to 1.0):
Weight to be assigned to MMM loss's image part.
mmm_text_weight (`float`, *optional*, defaults to 1.0):
Weight to be assigned to MMM loss's text part.
global_backprop_contrastive (`bool`, *optional*, defaults to `True`):
Whether to use global backpropgation through all workers in contrastive loss.
skip_unmasked_multimodal_encoder (`bool`, *optional*, defaults to `True`):
Whether to skip running unmasked multimodal encoder whose outputs are not used by FLAVA losses.
return_loss (`bool`, *optional*, defaults to `True`):
Whether to return loss or not
kwargs (*optional*):
Dictionary of keyword arguments.
Example:
```python
>>> from transformers import FlavaConfig, FlavaModel, FlavaForPreTraining
>>> # Initializing a FlavaConfig with style configuration
>>> configuration = FlavaConfig()
>>> # Initializing a FlavaModel and FlavaForPreTraining model (with random weights) from the style configuration
>>> model = FlavaModel(configuration)
>>> model_pre = FlavaForPreTraining(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
>>> configuration_pre = model_pre.config
```
"""
model_type = 'flava'
sub_configs = {'text_config': FlavaTextConfig, 'image_config': FlavaImageConfig, 'multimodal_config': FlavaMultimodalConfig, 'image_codebook_config': FlavaImageCodebookConfig}
def __init__(self, image_config: Optional[dict[str, Any]]=None, text_config: Optional[dict[str, Any]]=None, multimodal_config: Optional[dict[str, Any]]=None, image_codebook_config: Optional[dict[str, Any]]=None, hidden_size: int=768, layer_norm_eps: float=1e-12, projection_dim: int=768, init_codebook: bool=True, logit_scale_init_value: float=2.6592, initializer_range: float=0.02, ce_ignore_index: int=-100, mim_weight: float=1.0, mlm_weight: float=1.0, global_contrastive_weight: float=1.0, itm_weight: float=1.0, mmm_image_weight: float=1.0, mmm_text_weight: float=1.0, global_backprop_contrastive: bool=True, skip_unmasked_multimodal_encoder: bool=True, return_loss: bool=True, **kwargs):
text_config_dict = kwargs.pop('text_config_dict', None)
image_config_dict = kwargs.pop('image_config_dict', None)
multimodal_config_dict = kwargs.pop('multimodal_config_dict', None)
image_codebook_config_dict = kwargs.pop('image_codebook_config_dict', None)
super().__init__(**kwargs)
if text_config_dict is not None:
if text_config is None:
text_config = {}
_text_config_dict = FlavaTextConfig(**text_config_dict).to_dict()
for key, value in _text_config_dict.items():
if key in text_config and value != text_config[key] and (key not in ['transformers_version']):
if key in text_config_dict:
message = f'`{key}` is found in both `text_config_dict` and `text_config` but with different values. The value `text_config_dict["{key}"]` will be used instead.'
else:
message = f'`text_config_dict` is provided which will be used to initialize `FlavaTextConfig`. The value `text_config["{key}"]` will be overridden.'
logger.info(message)
text_config.update(_text_config_dict)
if image_config_dict is not None:
if image_config is None:
image_config = {}
_image_config_dict = FlavaImageConfig(**image_config_dict).to_dict()
if 'id2label' in _image_config_dict:
_image_config_dict['id2label'] = {str(key): value for key, value in _image_config_dict['id2label'].items()}
for key, value in _image_config_dict.items():
if key in image_config and value != image_config[key] and (key not in ['transformers_version']):
if key in image_config_dict:
message = f'`{key}` is found in both `image_config_dict` and `image_config` but with different values. The value `image_config_dict["{key}"]` will be used instead.'
else:
message = f'`image_config_dict` is provided which will be used to initialize `FlavaImageConfig`. The value `image_config["{key}"]` will be overridden.'
logger.info(message)
image_config.update(_image_config_dict)
if multimodal_config_dict is not None:
if multimodal_config is None:
multimodal_config = {}
_multimodal_config_dict = FlavaMultimodalConfig(**multimodal_config_dict).to_dict()
for key, value in _multimodal_config_dict.items():
if key in multimodal_config and value != multimodal_config[key] and (key not in ['transformers_version']):
if key in multimodal_config_dict:
message = f'`{key}` is found in both `multimodal_config_dict` and `multimodal_config` but with different values. The value `multimodal_config_dict["{key}"]` will be used instead.'
else:
message = f'`multimodal_config_dict` is provided which will be used to initialize `FlavaMultimodalConfig`. The value `multimodal_config["{key}"]` will be overridden.'
logger.info(message)
multimodal_config.update(_multimodal_config_dict)
if image_codebook_config_dict is not None:
if image_codebook_config is None:
image_codebook_config = {}
_image_codebook_config_dict = FlavaImageCodebookConfig(**image_codebook_config_dict).to_dict()
for key, value in _image_codebook_config_dict.items():
if key in image_codebook_config and value != image_codebook_config[key] and (key not in ['transformers_version']):
if key in image_codebook_config_dict:
message = f'`{key}` is found in both `image_codebook_config_dict` and `image_codebook_config` but with different values. The value `image_codebook_config_dict["{key}"]` will be used instead.'
else:
message = f'`image_codebook_config_dict` is provided which will be used to initialize `FlavaImageCodebookConfig`. The value `image_codebook_config["{key}"]` will be overridden.'
logger.info(message)
image_codebook_config.update(_image_codebook_config_dict)
if image_config is None:
image_config = {}
logger.info('`image_config` is `None`. initializing the `FlavaImageConfig` with default values.')
if text_config is None:
text_config = {}
logger.info('`text_config` is `None`. Initializing the `FlavaTextConfig` with default values.')
if multimodal_config is None:
multimodal_config = {}
logger.info('`multimodal_config` is `None`. initializing the `FlavaMultimodalConfig` with default values.')
if image_codebook_config is None:
image_codebook_config = {}
logger.info('`image_codebook_config` is `None`. initializing the `FlavaImageCodebookConfig` with default values.')
self.image_config = FlavaImageConfig(**image_config)
self.text_config = FlavaTextConfig(**text_config)
self.multimodal_config = FlavaMultimodalConfig(**multimodal_config)
self.image_codebook_config = FlavaImageCodebookConfig(**image_codebook_config)
self.projection_dim = projection_dim
self.init_codebook = init_codebook
self.hidden_size = hidden_size
self.layer_norm_eps = layer_norm_eps
self.initializer_range = initializer_range
self.logit_scale_init_value = logit_scale_init_value
self.initializer_factor = 1.0
self.ce_ignore_index = ce_ignore_index
self.mim_weight = mim_weight
self.mlm_weight = mlm_weight
self.global_contrastive_weight = global_contrastive_weight
self.itm_weight = itm_weight
self.mmm_image_weight = mmm_image_weight
self.mmm_text_weight = mmm_text_weight
self.global_backprop_contrastive = global_backprop_contrastive
self.skip_unmasked_multimodal_encoder = skip_unmasked_multimodal_encoder
self.return_loss = return_loss
@classmethod
def from_configs(cls, image_config: FlavaImageConfig, text_config: FlavaTextConfig, multimodal_config: FlavaMultimodalConfig, image_codebook_config: FlavaImageCodebookConfig, **kwargs):
"""
Instantiate a [`FlavaConfig`] (or a derived class) from flava text model configuration, flava image model
configuration, flava multimodal model and flava codebook model configuration.
Returns:
[`FlavaConfig`]: An instance of a configuration object
"""
return cls(image_config=image_config.to_dict(), text_config=text_config.to_dict(), multimodal_config=multimodal_config.to_dict(), image_codebook_config=image_codebook_config.to_dict(), **kwargs)
|
class FlavaConfig(PretrainedConfig):
'''
[`FlavaConfig`] is the configuration class to store the configuration of a [`FlavaModel`]. It is used to
instantiate FLAVA model according to the specified arguments, defining the text model, image model, image codebook
and multimodal model configs. Instantiating a configuration with the defaults will yield a similar configuration to
that of the FLAVA [facebook/flava-full](https://huggingface.co/facebook/flava-full) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
text_config (`dict`, *optional*):
Dictionary of configuration options used to initialize [`FlavaTextConfig`].
image_config (`dict`, *optional*):
Dictionary of configuration options used to initialize [`FlavaImageConfig`].
multimodal_config (`dict`, *optional*):
Dictionary of configuration options used to initialize [`FlavaMultimodalConfig`].
hidden_size (`int`, *optional*, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
The epsilon used by the layer normalization layers.
projection_dim (`int`, *optional*, defaults to 512):
Dimensionality of text and image projection layers.
logit_scale_init_value (`float`, *optional*, defaults to 2.6592):
The initial value of the *logit_scale* parameter. Default is used as per the original FLAVA/CLIP
implementation.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
ce_ignore_index (`int`, *optional*, defaults to -100):
Cross entropy index to ignore.
mim_weight (`float`, *optional*, defaults to 1.0):
Weight to be assigned to MIM (Masked Image Modeling) unimodal loss
mlm_weight (`float`, *optional*, defaults to 1.0):
Weight to be assigned to MLM (Masked Language Modeling) unimodal loss
global_contrastive_weight (`float`, *optional*, defaults to 1.0):
Weight to be assigned to global contrastive cross-alignment loss.
itm_weight (`float`, *optional*, defaults to 1.0):
Weight to be assigned to image-text matching multimodal loss.
mmm_image_weight (`float`, *optional*, defaults to 1.0):
Weight to be assigned to MMM loss's image part.
mmm_text_weight (`float`, *optional*, defaults to 1.0):
Weight to be assigned to MMM loss's text part.
global_backprop_contrastive (`bool`, *optional*, defaults to `True`):
Whether to use global backpropgation through all workers in contrastive loss.
skip_unmasked_multimodal_encoder (`bool`, *optional*, defaults to `True`):
Whether to skip running unmasked multimodal encoder whose outputs are not used by FLAVA losses.
return_loss (`bool`, *optional*, defaults to `True`):
Whether to return loss or not
kwargs (*optional*):
Dictionary of keyword arguments.
Example:
```python
>>> from transformers import FlavaConfig, FlavaModel, FlavaForPreTraining
>>> # Initializing a FlavaConfig with style configuration
>>> configuration = FlavaConfig()
>>> # Initializing a FlavaModel and FlavaForPreTraining model (with random weights) from the style configuration
>>> model = FlavaModel(configuration)
>>> model_pre = FlavaForPreTraining(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
>>> configuration_pre = model_pre.config
```
'''
def __init__(self, image_config: Optional[dict[str, Any]]=None, text_config: Optional[dict[str, Any]]=None, multimodal_config: Optional[dict[str, Any]]=None, image_codebook_config: Optional[dict[str, Any]]=None, hidden_size: int=768, layer_norm_eps: float=1e-12, projection_dim: int=768, init_codebook: bool=True, logit_scale_init_value: float=2.6592, initializer_range: float=0.02, ce_ignore_index: int=-100, mim_weight: float=1.0, mlm_weight: float=1.0, global_contrastive_weight: float=1.0, itm_weight: float=1.0, mmm_image_weight: float=1.0, mmm_text_weight: float=1.0, global_backprop_contrastive: bool=True, skip_unmasked_multimodal_encoder: bool=True, return_loss: bool=True, **kwargs):
pass
@classmethod
def from_configs(cls, image_config: FlavaImageConfig, text_config: FlavaTextConfig, multimodal_config: FlavaMultimodalConfig, image_codebook_config: FlavaImageCodebookConfig, **kwargs):
'''
Instantiate a [`FlavaConfig`] (or a derived class) from flava text model configuration, flava image model
configuration, flava multimodal model and flava codebook model configuration.
Returns:
[`FlavaConfig`]: An instance of a configuration object
'''
pass
| 4
| 2
| 112
| 13
| 82
| 18
| 14
| 0.55
| 1
| 10
| 4
| 0
| 1
| 21
| 2
| 2
| 304
| 36
| 173
| 67
| 139
| 95
| 90
| 36
| 87
| 26
| 1
| 4
| 27
|
2,533
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/flava/configuration_flava.py
|
transformers.models.flava.configuration_flava.FlavaImageCodebookConfig
|
from ...configuration_utils import PretrainedConfig
class FlavaImageCodebookConfig(PretrainedConfig):
model_type = 'flava_image_codebook'
base_config_key = 'image_codebook_config'
"\n [`FlavaImageCodebookConfig`] is the configuration class to store the configuration of a [`FlavaImageCodebook`]. It\n is used to instantiate an FLAVA model according to the specified arguments, defining the model architecture.\n Instantiating a configuration with the defaults will yield a similar configuration to that of the FLAVA\n [facebook/flava-image-codebook](https://huggingface.co/facebook/flava-image-codebook) architecture.\n\n Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the\n documentation from [`PretrainedConfig`] for more information.\n\n Args:\n num_groups (`int`, *optional*, defaults to 4):\n Number of groups to be created. This parameter as of now doesn't affect the model and is used for some\n internal calculation and estimations.\n input_channels (`int`, *optional*, defaults to 3):\n Number of channels in the image to be passed.\n num_blocks_per_group (`int`, *optional*, defaults to 2):\n Number of conv-based blocks per group.\n hidden_size (`int`, *optional*, defaults to 256):\n Size of hidden dim for the blocks.\n vocab_size (`int`, *optional*, defaults to 8192):\n Size of the output vocabulary for the codebook.\n freeze (`bool`, defaults to `True`):\n Whether to freeze the weights of the model.\n initializer_range (`float`, *optional*, defaults to 0.02):\n The standard deviation of the truncated_normal_initializer for initializing all weight matrices.\n kwargs (*optional*):\n Dictionary of keyword arguments.\n\n Example:\n\n ```python\n >>> from transformers import FlavaImageCodebookConfig, FlavaImageCodebook\n\n >>> # Initializing a FlavaImageCodebook with style configuration\n >>> configuration = FlavaImageCodebookConfig()\n\n >>> # Initializing a FlavaImageCodebook model (with random weights) from the style configuration\n >>> model = FlavaImageCodebook(configuration)\n >>> # Accessing the model configuration\n >>> configuration = model.config\n ```\n "
def __init__(self, num_groups: int=4, input_channels: int=3, num_blocks_per_group: int=2, hidden_size: int=256, vocab_size: int=8192, freeze: int=True, initializer_range: float=0.02, **kwargs):
super().__init__(**kwargs)
self.num_groups = num_groups
self.input_channels = input_channels
self.num_blocks_per_group = num_blocks_per_group
self.hidden_size = hidden_size
self.vocab_size = vocab_size
self.freeze = freeze
self.initializer_range = initializer_range
|
class FlavaImageCodebookConfig(PretrainedConfig):
def __init__(self, num_groups: int=4, input_channels: int=3, num_blocks_per_group: int=2, hidden_size: int=256, vocab_size: int=8192, freeze: int=True, initializer_range: float=0.02, **kwargs):
pass
| 2
| 0
| 19
| 0
| 19
| 0
| 1
| 1.64
| 1
| 3
| 0
| 0
| 1
| 7
| 1
| 1
| 66
| 8
| 22
| 21
| 10
| 36
| 12
| 11
| 10
| 1
| 1
| 0
| 1
|
2,534
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/flava/configuration_flava.py
|
transformers.models.flava.configuration_flava.FlavaImageConfig
|
from ...configuration_utils import PretrainedConfig
class FlavaImageConfig(PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`FlavaImageModel`]. It is used to instantiate an
FLAVA model according to the specified arguments, defining the model architecture.
Instantiating a configuration with the defaults will yield a similar configuration to that of the FLAVA
[facebook/flava-full](https://huggingface.co/facebook/flava-full) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
hidden_size (`int`, *optional*, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (`int`, *optional*, defaults to 3072):
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` are supported.
hidden_dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
The epsilon used by the layer normalization layers.
image_size (`int`, *optional*, defaults to 224):
The size (resolution) of each image.
patch_size (`int`, *optional*, defaults to 16):
The size (resolution) of each patch.
num_channels (`int`, *optional*, defaults to 3):
The number of input channels.
qkv_bias (`bool`, *optional*, defaults to `True`):
Whether to add a bias to the queries, keys and values.
mask_token (`bool`, *optional*, defaults to `True`):
Whether to use a mask token or not. Used in MIM (Masked Image Modeling) loss for FLAVA.
vocab_size (`int`, *optional*, defaults to 8192):
Vocabulary size of the [`FlavaImageCodebook`] used in conjunction with [`FlavaImageModel`] for MIM (Masked
Image Modeling) loss for FLAVA.
Example:
```python
>>> from transformers import FlavaImageConfig, FlavaImageModel
>>> # Initializing a FlavaImageModel with style configuration
>>> configuration = FlavaImageConfig()
>>> # Initializing a FlavaImageModel model (with random weights) from the style configuration
>>> model = FlavaImageModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = 'flava_image_model'
base_config_key = 'image_config'
def __init__(self, hidden_size: int=768, num_hidden_layers: int=12, num_attention_heads: int=12, intermediate_size: int=3072, hidden_act: int='gelu', hidden_dropout_prob: float=0.0, attention_probs_dropout_prob: float=0.0, initializer_range: float=0.02, layer_norm_eps: float=1e-12, image_size: int=224, patch_size: int=16, num_channels: int=3, qkv_bias: bool=True, mask_token: bool=True, vocab_size: int=8192, **kwargs):
super().__init__(**kwargs)
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.qkv_bias = qkv_bias
self.mask_token = mask_token
self.vocab_size = vocab_size
|
class FlavaImageConfig(PretrainedConfig):
'''
This is the configuration class to store the configuration of a [`FlavaImageModel`]. It is used to instantiate an
FLAVA model according to the specified arguments, defining the model architecture.
Instantiating a configuration with the defaults will yield a similar configuration to that of the FLAVA
[facebook/flava-full](https://huggingface.co/facebook/flava-full) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
hidden_size (`int`, *optional*, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (`int`, *optional*, defaults to 3072):
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` are supported.
hidden_dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
The epsilon used by the layer normalization layers.
image_size (`int`, *optional*, defaults to 224):
The size (resolution) of each image.
patch_size (`int`, *optional*, defaults to 16):
The size (resolution) of each patch.
num_channels (`int`, *optional*, defaults to 3):
The number of input channels.
qkv_bias (`bool`, *optional*, defaults to `True`):
Whether to add a bias to the queries, keys and values.
mask_token (`bool`, *optional*, defaults to `True`):
Whether to use a mask token or not. Used in MIM (Masked Image Modeling) loss for FLAVA.
vocab_size (`int`, *optional*, defaults to 8192):
Vocabulary size of the [`FlavaImageCodebook`] used in conjunction with [`FlavaImageModel`] for MIM (Masked
Image Modeling) loss for FLAVA.
Example:
```python
>>> from transformers import FlavaImageConfig, FlavaImageModel
>>> # Initializing a FlavaImageModel with style configuration
>>> configuration = FlavaImageConfig()
>>> # Initializing a FlavaImageModel model (with random weights) from the style configuration
>>> model = FlavaImageModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```'''
def __init__(self, hidden_size: int=768, num_hidden_layers: int=12, num_attention_heads: int=12, intermediate_size: int=3072, hidden_act: int='gelu', hidden_dropout_prob: float=0.0, attention_probs_dropout_prob: float=0.0, initializer_range: float=0.02, layer_norm_eps: float=1e-12, image_size: int=224, patch_size: int=16, num_channels: int=3, qkv_bias: bool=True, mask_token: bool=True, vocab_size: int=8192, **kwargs):
pass
| 2
| 1
| 36
| 1
| 35
| 0
| 1
| 1.32
| 1
| 4
| 0
| 0
| 1
| 15
| 1
| 1
| 100
| 12
| 38
| 37
| 18
| 50
| 20
| 19
| 18
| 1
| 1
| 0
| 1
|
2,535
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/flava/configuration_flava.py
|
transformers.models.flava.configuration_flava.FlavaMultimodalConfig
|
from ...configuration_utils import PretrainedConfig
class FlavaMultimodalConfig(PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`FlavaMultimodalModel`]. It is used to instantiate
an FLAVA model according to the specified arguments, defining the model architecture.
Instantiating a configuration with the defaults will yield a similar configuration to that of the FLAVA
[facebook/flava-full](https://huggingface.co/facebook/flava-full) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
hidden_size (`int`, *optional*, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 6):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (`int`, *optional*, defaults to 3072):
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` are supported.
hidden_dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
The epsilon used by the layer normalization layers.
qkv_bias (`bool`, *optional*, defaults to `True`):
Whether to add a bias to the queries, keys and values.
use_cls_token (`bool`, *optional*, defaults to `True`):
Whether to use an extra CLS token for multimodal settings. Usually needed by the FLAVA model.
Example:
```python
>>> from transformers import FlavaMultimodalConfig, FlavaMultimodalModel
>>> # Initializing a FlavaMultimodalModel with style configuration
>>> configuration = FlavaMultimodalConfig()
>>> # Initializing a FlavaMultimodalModel model (with random weights) from the style configuration
>>> model = FlavaMultimodalModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = 'flava_multimodal_model'
base_config_key = 'multimodal_config'
def __init__(self, hidden_size: int=768, num_hidden_layers: int=6, num_attention_heads: int=12, intermediate_size: int=3072, hidden_act: int='gelu', hidden_dropout_prob: int=0.0, attention_probs_dropout_prob: int=0.0, initializer_range: float=0.02, layer_norm_eps: float=1e-12, qkv_bias: bool=True, use_cls_token: bool=True, **kwargs):
super().__init__(**kwargs)
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.qkv_bias = qkv_bias
self.use_cls_token = use_cls_token
|
class FlavaMultimodalConfig(PretrainedConfig):
'''
This is the configuration class to store the configuration of a [`FlavaMultimodalModel`]. It is used to instantiate
an FLAVA model according to the specified arguments, defining the model architecture.
Instantiating a configuration with the defaults will yield a similar configuration to that of the FLAVA
[facebook/flava-full](https://huggingface.co/facebook/flava-full) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
hidden_size (`int`, *optional*, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 6):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (`int`, *optional*, defaults to 3072):
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` are supported.
hidden_dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
The epsilon used by the layer normalization layers.
qkv_bias (`bool`, *optional*, defaults to `True`):
Whether to add a bias to the queries, keys and values.
use_cls_token (`bool`, *optional*, defaults to `True`):
Whether to use an extra CLS token for multimodal settings. Usually needed by the FLAVA model.
Example:
```python
>>> from transformers import FlavaMultimodalConfig, FlavaMultimodalModel
>>> # Initializing a FlavaMultimodalModel with style configuration
>>> configuration = FlavaMultimodalConfig()
>>> # Initializing a FlavaMultimodalModel model (with random weights) from the style configuration
>>> model = FlavaMultimodalModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```'''
def __init__(self, hidden_size: int=768, num_hidden_layers: int=6, num_attention_heads: int=12, intermediate_size: int=3072, hidden_act: int='gelu', hidden_dropout_prob: int=0.0, attention_probs_dropout_prob: int=0.0, initializer_range: float=0.02, layer_norm_eps: float=1e-12, qkv_bias: bool=True, use_cls_token: bool=True, **kwargs):
pass
| 2
| 1
| 28
| 1
| 27
| 0
| 1
| 1.37
| 1
| 4
| 0
| 0
| 1
| 11
| 1
| 1
| 84
| 13
| 30
| 29
| 14
| 41
| 16
| 15
| 14
| 1
| 1
| 0
| 1
|
2,536
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/flava/configuration_flava.py
|
transformers.models.flava.configuration_flava.FlavaTextConfig
|
from ...configuration_utils import PretrainedConfig
class FlavaTextConfig(PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`FlavaTextModel`]. It is used to instantiate an
FLAVA model according to the specified arguments, defining the model architecture.
Instantiating a configuration with the defaults will yield a similar configuration to that of the FLAVA
[facebook/flava-full](https://huggingface.co/facebook/flava-full) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 30522):
Vocabulary size of the BERT model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`FlavaTextModel`].
type_vocab_size (`int`, *optional*, defaults to 2):
The vocabulary size of the `token_type_ids` passed when calling [`FlavaTextModel`]. Note that even though
text encoder allows `token_type_ids`'s value as 2, for text-only pretraining and fine-tuning, only 1 is
used similar to RoBERTa.
max_position_embeddings (`int`, *optional*, defaults to 512):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048). For VL, max_length passed to model is 77.
position_embedding_type (`str`, *optional*, defaults to `"absolute"`):
Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For
positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to
[Self-Attention with Relative Position Representations (Shaw et al.)](https://huggingface.co/papers/1803.02155).
For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models
with Better Relative Position Embeddings (Huang et al.)](https://huggingface.co/papers/2009.13658).
hidden_size (`int`, *optional*, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (`int`, *optional*, defaults to 3072):
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` are supported.
hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout ratio for the attention probabilities.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
The epsilon used by the layer normalization layers.
image_size (`int`, *optional*, defaults to 224):
The size (resolution) of each image.
patch_size (`int`, *optional*, defaults to 16):
The size (resolution) of each patch.
num_channels (`int`, *optional*, defaults to 3):
The number of input channels.
qkv_bias (`bool`, *optional*, defaults to `True`):
Whether to add a bias to the queries, keys and values.
Example:
```python
>>> from transformers import FlavaTextConfig, FlavaTextModel
>>> # Initializing a FlavaTextModel with style configuration
>>> configuration = FlavaTextConfig()
>>> # Initializing a FlavaTextModel model (with random weights) from the style configuration
>>> model = FlavaTextModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = 'flava_text_model'
base_config_key = 'text_config'
def __init__(self, vocab_size: int=30522, type_vocab_size: int=2, max_position_embeddings: int=512, position_embedding_type: str='absolute', hidden_size: int=768, num_hidden_layers: int=12, num_attention_heads: int=12, intermediate_size: int=3072, hidden_act: str='gelu', hidden_dropout_prob: float=0.0, attention_probs_dropout_prob: float=0.0, initializer_range: float=0.02, layer_norm_eps: float=1e-12, pad_token_id: int=0, qkv_bias: bool=True, **kwargs):
super().__init__(**kwargs)
self.vocab_size = vocab_size
self.type_vocab_size = type_vocab_size
self.max_position_embeddings = max_position_embeddings
self.position_embedding_type = position_embedding_type
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.qkv_bias = qkv_bias
self.pad_token_id = pad_token_id
|
class FlavaTextConfig(PretrainedConfig):
'''
This is the configuration class to store the configuration of a [`FlavaTextModel`]. It is used to instantiate an
FLAVA model according to the specified arguments, defining the model architecture.
Instantiating a configuration with the defaults will yield a similar configuration to that of the FLAVA
[facebook/flava-full](https://huggingface.co/facebook/flava-full) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 30522):
Vocabulary size of the BERT model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`FlavaTextModel`].
type_vocab_size (`int`, *optional*, defaults to 2):
The vocabulary size of the `token_type_ids` passed when calling [`FlavaTextModel`]. Note that even though
text encoder allows `token_type_ids`'s value as 2, for text-only pretraining and fine-tuning, only 1 is
used similar to RoBERTa.
max_position_embeddings (`int`, *optional*, defaults to 512):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048). For VL, max_length passed to model is 77.
position_embedding_type (`str`, *optional*, defaults to `"absolute"`):
Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For
positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to
[Self-Attention with Relative Position Representations (Shaw et al.)](https://huggingface.co/papers/1803.02155).
For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models
with Better Relative Position Embeddings (Huang et al.)](https://huggingface.co/papers/2009.13658).
hidden_size (`int`, *optional*, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (`int`, *optional*, defaults to 3072):
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` are supported.
hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout ratio for the attention probabilities.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
The epsilon used by the layer normalization layers.
image_size (`int`, *optional*, defaults to 224):
The size (resolution) of each image.
patch_size (`int`, *optional*, defaults to 16):
The size (resolution) of each patch.
num_channels (`int`, *optional*, defaults to 3):
The number of input channels.
qkv_bias (`bool`, *optional*, defaults to `True`):
Whether to add a bias to the queries, keys and values.
Example:
```python
>>> from transformers import FlavaTextConfig, FlavaTextModel
>>> # Initializing a FlavaTextModel with style configuration
>>> configuration = FlavaTextConfig()
>>> # Initializing a FlavaTextModel model (with random weights) from the style configuration
>>> model = FlavaTextModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```'''
def __init__(self, vocab_size: int=30522, type_vocab_size: int=2, max_position_embeddings: int=512, position_embedding_type: str='absolute', hidden_size: int=768, num_hidden_layers: int=12, num_attention_heads: int=12, intermediate_size: int=3072, hidden_act: str='gelu', hidden_dropout_prob: float=0.0, attention_probs_dropout_prob: float=0.0, initializer_range: float=0.02, layer_norm_eps: float=1e-12, pad_token_id: int=0, qkv_bias: bool=True, **kwargs):
pass
| 2
| 1
| 36
| 1
| 35
| 0
| 1
| 1.61
| 1
| 5
| 0
| 0
| 1
| 15
| 1
| 1
| 111
| 12
| 38
| 37
| 18
| 61
| 20
| 19
| 18
| 1
| 1
| 0
| 1
|
2,537
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/flava/feature_extraction_flava.py
|
transformers.models.flava.feature_extraction_flava.FlavaFeatureExtractor
|
from ...utils.import_utils import requires
import warnings
from .image_processing_flava import FlavaImageProcessor
@requires(backends=('vision',))
class FlavaFeatureExtractor(FlavaImageProcessor):
def __init__(self, *args, **kwargs) -> None:
warnings.warn('The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please use FlavaImageProcessor instead.', FutureWarning)
super().__init__(*args, **kwargs)
|
@requires(backends=('vision',))
class FlavaFeatureExtractor(FlavaImageProcessor):
def __init__(self, *args, **kwargs) -> None:
pass
| 3
| 0
| 7
| 0
| 7
| 0
| 1
| 0
| 1
| 2
| 0
| 0
| 1
| 0
| 1
| 28
| 8
| 0
| 8
| 2
| 6
| 0
| 4
| 2
| 2
| 1
| 4
| 0
| 1
|
2,538
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/flava/image_processing_flava.py
|
transformers.models.flava.image_processing_flava.FlavaImageProcessor
|
import numpy as np
from collections.abc import Iterable
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...utils.import_utils import requires
from ...utils import TensorType, filter_out_non_signature_kwargs, is_vision_available, logging
from ...image_transforms import resize, to_channel_dimension_format
from ...image_utils import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, infer_channel_dimension_format, is_scaled_image, make_flat_list_of_images, to_numpy_array, valid_images, validate_preprocess_arguments
from functools import lru_cache
from typing import Any, Optional, Union
@requires(backends=('vision',))
class FlavaImageProcessor(BaseImageProcessor):
"""
Constructs a Flava image processor.
Args:
do_resize (`bool`, *optional*, defaults to `True`):
Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by the
`do_resize` parameter in `preprocess`.
size (`dict[str, int]` *optional*, defaults to `{"height": 224, "width": 224}`):
Size of the image after resizing. Can be overridden by the `size` parameter in `preprocess`.
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
Resampling filter to use if resizing the image. Can be overridden by the `resample` parameter in
`preprocess`.
do_center_crop (`bool`, *optional*, defaults to `True`):
Whether to center crop the images. Can be overridden by the `do_center_crop` parameter in `preprocess`.
crop_size (`dict[str, int]` *optional*, defaults to `{"height": 224, "width": 224}`):
Size of image after the center crop `(crop_size["height"], crop_size["width"])`. Can be overridden by the
`crop_size` parameter in `preprocess`.
do_rescale (`bool`, *optional*, defaults to `True`):
Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale`
parameter in `preprocess`.
rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in
`preprocess`.
do_normalize (`bool`, *optional*, defaults to `True`):
Whether to normalize the image. Can be overridden by the `do_normalize` parameter in `preprocess`.
image_mean (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`):
Mean to use if normalizing the image. This is a float or list of floats the length of the number of
channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
image_std (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`):
Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
return_image_mask (`bool`, *optional*, defaults to `False`):
Whether to return the image mask. Can be overridden by the `return_image_mask` parameter in `preprocess`.
input_size_patches (`int`, *optional*, defaults to 14):
Number of patches in the image in height and width direction. 14x14 = 196 total patches. Can be overridden
by the `input_size_patches` parameter in `preprocess`.
total_mask_patches (`int`, *optional*, defaults to 75):
Total number of patches that should be masked. Can be overridden by the `total_mask_patches` parameter in
`preprocess`.
mask_group_min_patches (`int`, *optional*, defaults to 16):
Minimum number of patches that should be masked. Can be overridden by the `mask_group_min_patches`
parameter in `preprocess`.
mask_group_max_patches (`int`, *optional*):
Maximum number of patches that should be masked. Can be overridden by the `mask_group_max_patches`
parameter in `preprocess`.
mask_group_min_aspect_ratio (`float`, *optional*, defaults to 0.3):
Minimum aspect ratio of the mask window. Can be overridden by the `mask_group_min_aspect_ratio` parameter
in `preprocess`.
mask_group_max_aspect_ratio (`float`, *optional*):
Maximum aspect ratio of the mask window. Can be overridden by the `mask_group_max_aspect_ratio` parameter
in `preprocess`.
codebook_do_resize (`bool`, *optional*, defaults to `True`):
Whether to resize the input for codebook to a certain. Can be overridden by the `codebook_do_resize`
parameter in `preprocess`. `codebook_size`.
codebook_size (`dict[str, int]`, *optional*, defaults to `{"height": 224, "width": 224}`):
Resize the input for codebook to the given size. Can be overridden by the `codebook_size` parameter in
`preprocess`.
codebook_resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.LANCZOS`):
Resampling filter to use if resizing the codebook image. Can be overridden by the `codebook_resample`
parameter in `preprocess`.
codebook_do_center_crop (`bool`, *optional*, defaults to `True`):
Whether to crop the input for codebook at the center. If the input size is smaller than
`codebook_crop_size` along any edge, the image is padded with 0's and then center cropped. Can be
overridden by the `codebook_do_center_crop` parameter in `preprocess`.
codebook_crop_size (`dict[str, int]`, *optional*, defaults to `{"height": 224, "width": 224}`):
Desired output size for codebook input when applying center-cropping. Can be overridden by the
`codebook_crop_size` parameter in `preprocess`.
codebook_do_rescale (`bool`, *optional*, defaults to `True`):
Whether to rescale the input for codebook by the specified scale `codebook_rescale_factor`. Can be
overridden by the `codebook_do_rescale` parameter in `preprocess`.
codebook_rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
Defines the scale factor to use if rescaling the codebook image. Can be overridden by the
`codebook_rescale_factor` parameter in `preprocess`.
codebook_do_map_pixels (`bool`, *optional*, defaults to `True`):
Whether to map the pixel values of the codebook input to (1 - 2e)x + e. Can be overridden by the
`codebook_do_map_pixels` parameter in `preprocess`.
codebook_do_normalize (`bool`, *optional*, defaults to `True`):
Whether or not to normalize the input for codebook with `codebook_image_mean` and `codebook_image_std`. Can
be overridden by the `codebook_do_normalize` parameter in `preprocess`.
codebook_image_mean (`Optional[Union[float, Iterable[float]]]`, *optional*, defaults to `[0, 0, 0]`):
The sequence of means for each channel, to be used when normalizing images for codebook. Can be overridden
by the `codebook_image_mean` parameter in `preprocess`.
codebook_image_std (`Optional[Union[float, Iterable[float]]]`, *optional*, defaults to `[0.5, 0.5, 0.5]`):
The sequence of standard deviations for each channel, to be used when normalizing images for codebook. Can
be overridden by the `codebook_image_std` parameter in `preprocess`.
"""
model_input_names = ['pixel_values']
def __init__(self, do_resize: bool=True, size: Optional[dict[str, int]]=None, resample: PILImageResampling=PILImageResampling.BICUBIC, do_center_crop: bool=True, crop_size: Optional[dict[str, int]]=None, do_rescale: bool=True, rescale_factor: Union[int, float]=1 / 255, do_normalize: bool=True, image_mean: Optional[Union[float, Iterable[float]]]=None, image_std: Optional[Union[float, Iterable[float]]]=None, return_image_mask: bool=False, input_size_patches: int=14, total_mask_patches: int=75, mask_group_min_patches: int=16, mask_group_max_patches: Optional[int]=None, mask_group_min_aspect_ratio: float=0.3, mask_group_max_aspect_ratio: Optional[float]=None, return_codebook_pixels: bool=False, codebook_do_resize: bool=True, codebook_size: Optional[bool]=None, codebook_resample: int=PILImageResampling.LANCZOS, codebook_do_center_crop: bool=True, codebook_crop_size: Optional[int]=None, codebook_do_rescale: bool=True, codebook_rescale_factor: Union[int, float]=1 / 255, codebook_do_map_pixels: bool=True, codebook_do_normalize: bool=True, codebook_image_mean: Optional[Union[float, Iterable[float]]]=None, codebook_image_std: Optional[Union[float, Iterable[float]]]=None, **kwargs) -> None:
super().__init__(**kwargs)
size = size if size is not None else {'height': 224, 'width': 224}
size = get_size_dict(size)
crop_size = crop_size if crop_size is not None else {'height': 224, 'width': 224}
crop_size = get_size_dict(crop_size, param_name='crop_size')
codebook_size = codebook_size if codebook_size is not None else {'height': 112, 'width': 112}
codebook_size = get_size_dict(codebook_size, param_name='codebook_size')
codebook_crop_size = codebook_crop_size if codebook_crop_size is not None else {'height': 112, 'width': 112}
codebook_crop_size = get_size_dict(codebook_crop_size, param_name='codebook_crop_size')
self.do_resize = do_resize
self.size = size
self.resample = resample
self.do_rescale = do_rescale
self.rescale_factor = rescale_factor
self.do_center_crop = do_center_crop
self.crop_size = crop_size
self.do_normalize = do_normalize
self.image_mean = image_mean if image_mean is not None else FLAVA_IMAGE_MEAN
self.image_std = image_std if image_std is not None else FLAVA_IMAGE_STD
self.return_image_mask = return_image_mask
self.input_size_patches = input_size_patches
self.total_mask_patches = total_mask_patches
self.mask_group_min_patches = mask_group_min_patches
self.mask_group_max_patches = mask_group_max_patches
self.mask_group_min_aspect_ratio = mask_group_min_aspect_ratio
self.mask_group_max_aspect_ratio = mask_group_max_aspect_ratio
self.return_codebook_pixels = return_codebook_pixels
self.codebook_do_resize = codebook_do_resize
self.codebook_size = codebook_size
self.codebook_resample = codebook_resample
self.codebook_do_center_crop = codebook_do_center_crop
self.codebook_crop_size = codebook_crop_size
self.codebook_do_rescale = codebook_do_rescale
self.codebook_rescale_factor = codebook_rescale_factor
self.codebook_do_map_pixels = codebook_do_map_pixels
self.codebook_do_normalize = codebook_do_normalize
self.codebook_image_mean = codebook_image_mean
self.codebook_image_mean = codebook_image_mean if codebook_image_mean is not None else FLAVA_CODEBOOK_MEAN
self.codebook_image_std = codebook_image_std if codebook_image_std is not None else FLAVA_CODEBOOK_STD
@classmethod
def from_dict(cls, image_processor_dict: dict[str, Any], **kwargs):
"""
Overrides the `from_dict` method from the base class to make sure parameters are updated if image processor is
created using from_dict and kwargs e.g. `FlavaImageProcessor.from_pretrained(checkpoint, codebook_size=600)`
"""
image_processor_dict = image_processor_dict.copy()
if 'codebook_size' in kwargs:
image_processor_dict['codebook_size'] = kwargs.pop('codebook_size')
if 'codebook_crop_size' in kwargs:
image_processor_dict['codebook_crop_size'] = kwargs.pop('codebook_crop_size')
return super().from_dict(image_processor_dict, **kwargs)
@lru_cache
def masking_generator(self, input_size_patches, total_mask_patches, mask_group_min_patches, mask_group_max_patches, mask_group_min_aspect_ratio, mask_group_max_aspect_ratio) -> FlavaMaskingGenerator:
return FlavaMaskingGenerator(input_size=input_size_patches, total_mask_patches=total_mask_patches, mask_group_min_patches=mask_group_min_patches, mask_group_max_patches=mask_group_max_patches, mask_group_min_aspect_ratio=mask_group_min_aspect_ratio, mask_group_max_aspect_ratio=mask_group_max_aspect_ratio)
def resize(self, image: np.ndarray, size: dict[str, int], resample: PILImageResampling=PILImageResampling.BICUBIC, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray:
"""
Resize an image to `(size["height"], size["width"])`.
Args:
image (`np.ndarray`):
Image to resize.
size (`dict[str, int]`):
Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image.
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
`PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BICUBIC`.
data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the output image. If unset, the channel dimension format of the input
image is used. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
Returns:
`np.ndarray`: The resized image.
"""
size = get_size_dict(size)
if 'height' not in size or 'width' not in size:
raise ValueError(f'The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}')
output_size = (size['height'], size['width'])
return resize(image, size=output_size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs)
def map_pixels(self, image: np.ndarray) -> np.ndarray:
return (1 - 2 * LOGIT_LAPLACE_EPS) * image + LOGIT_LAPLACE_EPS
def _preprocess_image(self, image: ImageInput, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, resample: Optional[PILImageResampling]=None, do_center_crop: Optional[bool]=None, crop_size: Optional[dict[str, int]]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, do_map_pixels: Optional[bool]=None, data_format: Optional[ChannelDimension]=ChannelDimension.FIRST, input_data_format: Optional[ChannelDimension]=None) -> np.ndarray:
"""Preprocesses a single image."""
validate_preprocess_arguments(do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_center_crop=do_center_crop, crop_size=crop_size, do_resize=do_resize, size=size, resample=resample)
image = to_numpy_array(image)
if do_rescale and is_scaled_image(image):
logger.warning_once('It looks like you are trying to rescale already rescaled images. If the input images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again.')
if input_data_format is None:
input_data_format = infer_channel_dimension_format(image)
if do_resize:
image = self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format)
if do_center_crop:
image = self.center_crop(image=image, size=crop_size, input_data_format=input_data_format)
if do_rescale:
image = self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
if do_normalize:
image = self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format)
if do_map_pixels:
image = self.map_pixels(image)
if data_format is not None:
image = to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format)
return image
@filter_out_non_signature_kwargs()
def preprocess(self, images: ImageInput, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, resample: Optional[PILImageResampling]=None, do_center_crop: Optional[bool]=None, crop_size: Optional[dict[str, int]]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, return_image_mask: Optional[bool]=None, input_size_patches: Optional[int]=None, total_mask_patches: Optional[int]=None, mask_group_min_patches: Optional[int]=None, mask_group_max_patches: Optional[int]=None, mask_group_min_aspect_ratio: Optional[float]=None, mask_group_max_aspect_ratio: Optional[float]=None, return_codebook_pixels: Optional[bool]=None, codebook_do_resize: Optional[bool]=None, codebook_size: Optional[dict[str, int]]=None, codebook_resample: Optional[int]=None, codebook_do_center_crop: Optional[bool]=None, codebook_crop_size: Optional[dict[str, int]]=None, codebook_do_rescale: Optional[bool]=None, codebook_rescale_factor: Optional[float]=None, codebook_do_map_pixels: Optional[bool]=None, codebook_do_normalize: Optional[bool]=None, codebook_image_mean: Optional[Iterable[float]]=None, codebook_image_std: Optional[Iterable[float]]=None, return_tensors: Optional[Union[str, TensorType]]=None, data_format: ChannelDimension=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> PIL.Image.Image:
"""
Preprocess an image or batch of images.
Args:
images (`ImageInput`):
Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
passing in images with pixel values between 0 and 1, set `do_rescale=False`.
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
Whether to resize the image.
size (`dict[str, int]`, *optional*, defaults to `self.size`):
Size of the image.
resample (`int`, *optional*, defaults to `self.resample`):
Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`, Only
has an effect if `do_resize` is set to `True`.
do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`):
Whether to center crop the image.
crop_size (`dict[str, int]`, *optional*, defaults to `self.crop_size`):
Size of the center crop. Only has an effect if `do_center_crop` is set to `True`.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the image values between [0 - 1].
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
Whether to normalize the image.
image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`):
Image mean.
image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`):
Image standard deviation.
return_image_mask (`bool`, *optional*, defaults to `self.return_image_mask`):
Whether to return the image mask.
input_size_patches (`int`, *optional*, defaults to `self.input_size_patches`):
Size of the patches to extract from the image.
total_mask_patches (`int`, *optional*, defaults to `self.total_mask_patches`):
Total number of patches to extract from the image.
mask_group_min_patches (`int`, *optional*, defaults to `self.mask_group_min_patches`):
Minimum number of patches to extract from the image.
mask_group_max_patches (`int`, *optional*, defaults to `self.mask_group_max_patches`):
Maximum number of patches to extract from the image.
mask_group_min_aspect_ratio (`float`, *optional*, defaults to `self.mask_group_min_aspect_ratio`):
Minimum aspect ratio of the patches to extract from the image.
mask_group_max_aspect_ratio (`float`, *optional*, defaults to `self.mask_group_max_aspect_ratio`):
Maximum aspect ratio of the patches to extract from the image.
return_codebook_pixels (`bool`, *optional*, defaults to `self.return_codebook_pixels`):
Whether to return the codebook pixels.
codebook_do_resize (`bool`, *optional*, defaults to `self.codebook_do_resize`):
Whether to resize the codebook pixels.
codebook_size (`dict[str, int]`, *optional*, defaults to `self.codebook_size`):
Size of the codebook pixels.
codebook_resample (`int`, *optional*, defaults to `self.codebook_resample`):
Resampling filter to use if resizing the codebook pixels. This can be one of the enum
`PILImageResampling`, Only has an effect if `codebook_do_resize` is set to `True`.
codebook_do_center_crop (`bool`, *optional*, defaults to `self.codebook_do_center_crop`):
Whether to center crop the codebook pixels.
codebook_crop_size (`dict[str, int]`, *optional*, defaults to `self.codebook_crop_size`):
Size of the center crop of the codebook pixels. Only has an effect if `codebook_do_center_crop` is set
to `True`.
codebook_do_rescale (`bool`, *optional*, defaults to `self.codebook_do_rescale`):
Whether to rescale the codebook pixels values between [0 - 1].
codebook_rescale_factor (`float`, *optional*, defaults to `self.codebook_rescale_factor`):
Rescale factor to rescale the codebook pixels by if `codebook_do_rescale` is set to `True`.
codebook_do_map_pixels (`bool`, *optional*, defaults to `self.codebook_do_map_pixels`):
Whether to map the codebook pixels values.
codebook_do_normalize (`bool`, *optional*, defaults to `self.codebook_do_normalize`):
Whether to normalize the codebook pixels.
codebook_image_mean (`float` or `list[float]`, *optional*, defaults to `self.codebook_image_mean`):
Codebook pixels mean to normalize the codebook pixels by if `codebook_do_normalize` is set to `True`.
codebook_image_std (`float` or `list[float]`, *optional*, defaults to `self.codebook_image_std`):
Codebook pixels standard deviation to normalize the codebook pixels by if `codebook_do_normalize` is
set to `True`.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. Can be one of:
- `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `ChannelDimension.LAST`: image in (height, width, num_channels) format.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
"""
do_resize = do_resize if do_resize is not None else self.do_resize
size = size if size is not None else self.size
size = get_size_dict(size)
resample = resample if resample is not None else self.resample
do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop
crop_size = crop_size if crop_size is not None else self.crop_size
crop_size = get_size_dict(crop_size, param_name='crop_size')
do_rescale = do_rescale if do_rescale is not None else self.do_rescale
rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
do_normalize = do_normalize if do_normalize is not None else self.do_normalize
image_mean = image_mean if image_mean is not None else self.image_mean
image_std = image_std if image_std is not None else self.image_std
return_image_mask = return_image_mask if return_image_mask is not None else self.return_image_mask
input_size_patches = input_size_patches if input_size_patches is not None else self.input_size_patches
total_mask_patches = total_mask_patches if total_mask_patches is not None else self.total_mask_patches
mask_group_min_patches = mask_group_min_patches if mask_group_min_patches is not None else self.mask_group_min_patches
mask_group_max_patches = mask_group_max_patches if mask_group_max_patches is not None else self.mask_group_max_patches
mask_group_min_aspect_ratio = mask_group_min_aspect_ratio if mask_group_min_aspect_ratio is not None else self.mask_group_min_aspect_ratio
mask_group_max_aspect_ratio = mask_group_max_aspect_ratio if mask_group_max_aspect_ratio is not None else self.mask_group_max_aspect_ratio
return_codebook_pixels = return_codebook_pixels if return_codebook_pixels is not None else self.return_codebook_pixels
codebook_do_resize = codebook_do_resize if codebook_do_resize is not None else self.codebook_do_resize
codebook_size = codebook_size if codebook_size is not None else self.codebook_size
codebook_size = get_size_dict(codebook_size, param_name='codebook_size')
codebook_resample = codebook_resample if codebook_resample is not None else self.codebook_resample
codebook_do_rescale = codebook_do_rescale if codebook_do_rescale is not None else self.codebook_do_rescale
codebook_rescale_factor = codebook_rescale_factor if codebook_rescale_factor is not None else self.codebook_rescale_factor
codebook_do_center_crop = codebook_do_center_crop if codebook_do_center_crop is not None else self.codebook_do_center_crop
codebook_crop_size = codebook_crop_size if codebook_crop_size is not None else self.codebook_crop_size
codebook_crop_size = get_size_dict(codebook_crop_size, param_name='codebook_crop_size')
codebook_do_map_pixels = codebook_do_map_pixels if codebook_do_map_pixels is not None else self.codebook_do_map_pixels
codebook_do_normalize = codebook_do_normalize if codebook_do_normalize is not None else self.codebook_do_normalize
codebook_image_mean = codebook_image_mean if codebook_image_mean is not None else self.codebook_image_mean
codebook_image_std = codebook_image_std if codebook_image_std is not None else self.codebook_image_std
images = make_flat_list_of_images(images)
if not valid_images(images):
raise ValueError('Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, or torch.Tensor')
processed_images = [self._preprocess_image(image=img, do_resize=do_resize, size=size, resample=resample, do_center_crop=do_center_crop, crop_size=crop_size, do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_map_pixels=False, data_format=data_format, input_data_format=input_data_format) for img in images]
data = {'pixel_values': processed_images}
if return_codebook_pixels:
codebook_images = [self._preprocess_image(image=img, do_resize=codebook_do_resize, size=codebook_size, resample=codebook_resample, do_center_crop=codebook_do_center_crop, crop_size=codebook_crop_size, do_rescale=codebook_do_rescale, rescale_factor=codebook_rescale_factor, do_normalize=codebook_do_normalize, image_mean=codebook_image_mean, image_std=codebook_image_std, do_map_pixels=codebook_do_map_pixels, data_format=data_format, input_data_format=input_data_format) for img in images]
data['codebook_pixel_values'] = codebook_images
if return_image_mask:
mask_generator = self.masking_generator(input_size_patches=input_size_patches, total_mask_patches=total_mask_patches, mask_group_min_patches=mask_group_min_patches, mask_group_max_patches=mask_group_max_patches, mask_group_min_aspect_ratio=mask_group_min_aspect_ratio, mask_group_max_aspect_ratio=mask_group_max_aspect_ratio)
masks = [mask_generator() for _ in images]
data['bool_masked_pos'] = masks
return BatchFeature(data=data, tensor_type=return_tensors)
|
@requires(backends=('vision',))
class FlavaImageProcessor(BaseImageProcessor):
'''
Constructs a Flava image processor.
Args:
do_resize (`bool`, *optional*, defaults to `True`):
Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by the
`do_resize` parameter in `preprocess`.
size (`dict[str, int]` *optional*, defaults to `{"height": 224, "width": 224}`):
Size of the image after resizing. Can be overridden by the `size` parameter in `preprocess`.
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
Resampling filter to use if resizing the image. Can be overridden by the `resample` parameter in
`preprocess`.
do_center_crop (`bool`, *optional*, defaults to `True`):
Whether to center crop the images. Can be overridden by the `do_center_crop` parameter in `preprocess`.
crop_size (`dict[str, int]` *optional*, defaults to `{"height": 224, "width": 224}`):
Size of image after the center crop `(crop_size["height"], crop_size["width"])`. Can be overridden by the
`crop_size` parameter in `preprocess`.
do_rescale (`bool`, *optional*, defaults to `True`):
Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale`
parameter in `preprocess`.
rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in
`preprocess`.
do_normalize (`bool`, *optional*, defaults to `True`):
Whether to normalize the image. Can be overridden by the `do_normalize` parameter in `preprocess`.
image_mean (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`):
Mean to use if normalizing the image. This is a float or list of floats the length of the number of
channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
image_std (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`):
Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
return_image_mask (`bool`, *optional*, defaults to `False`):
Whether to return the image mask. Can be overridden by the `return_image_mask` parameter in `preprocess`.
input_size_patches (`int`, *optional*, defaults to 14):
Number of patches in the image in height and width direction. 14x14 = 196 total patches. Can be overridden
by the `input_size_patches` parameter in `preprocess`.
total_mask_patches (`int`, *optional*, defaults to 75):
Total number of patches that should be masked. Can be overridden by the `total_mask_patches` parameter in
`preprocess`.
mask_group_min_patches (`int`, *optional*, defaults to 16):
Minimum number of patches that should be masked. Can be overridden by the `mask_group_min_patches`
parameter in `preprocess`.
mask_group_max_patches (`int`, *optional*):
Maximum number of patches that should be masked. Can be overridden by the `mask_group_max_patches`
parameter in `preprocess`.
mask_group_min_aspect_ratio (`float`, *optional*, defaults to 0.3):
Minimum aspect ratio of the mask window. Can be overridden by the `mask_group_min_aspect_ratio` parameter
in `preprocess`.
mask_group_max_aspect_ratio (`float`, *optional*):
Maximum aspect ratio of the mask window. Can be overridden by the `mask_group_max_aspect_ratio` parameter
in `preprocess`.
codebook_do_resize (`bool`, *optional*, defaults to `True`):
Whether to resize the input for codebook to a certain. Can be overridden by the `codebook_do_resize`
parameter in `preprocess`. `codebook_size`.
codebook_size (`dict[str, int]`, *optional*, defaults to `{"height": 224, "width": 224}`):
Resize the input for codebook to the given size. Can be overridden by the `codebook_size` parameter in
`preprocess`.
codebook_resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.LANCZOS`):
Resampling filter to use if resizing the codebook image. Can be overridden by the `codebook_resample`
parameter in `preprocess`.
codebook_do_center_crop (`bool`, *optional*, defaults to `True`):
Whether to crop the input for codebook at the center. If the input size is smaller than
`codebook_crop_size` along any edge, the image is padded with 0's and then center cropped. Can be
overridden by the `codebook_do_center_crop` parameter in `preprocess`.
codebook_crop_size (`dict[str, int]`, *optional*, defaults to `{"height": 224, "width": 224}`):
Desired output size for codebook input when applying center-cropping. Can be overridden by the
`codebook_crop_size` parameter in `preprocess`.
codebook_do_rescale (`bool`, *optional*, defaults to `True`):
Whether to rescale the input for codebook by the specified scale `codebook_rescale_factor`. Can be
overridden by the `codebook_do_rescale` parameter in `preprocess`.
codebook_rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
Defines the scale factor to use if rescaling the codebook image. Can be overridden by the
`codebook_rescale_factor` parameter in `preprocess`.
codebook_do_map_pixels (`bool`, *optional*, defaults to `True`):
Whether to map the pixel values of the codebook input to (1 - 2e)x + e. Can be overridden by the
`codebook_do_map_pixels` parameter in `preprocess`.
codebook_do_normalize (`bool`, *optional*, defaults to `True`):
Whether or not to normalize the input for codebook with `codebook_image_mean` and `codebook_image_std`. Can
be overridden by the `codebook_do_normalize` parameter in `preprocess`.
codebook_image_mean (`Optional[Union[float, Iterable[float]]]`, *optional*, defaults to `[0, 0, 0]`):
The sequence of means for each channel, to be used when normalizing images for codebook. Can be overridden
by the `codebook_image_mean` parameter in `preprocess`.
codebook_image_std (`Optional[Union[float, Iterable[float]]]`, *optional*, defaults to `[0.5, 0.5, 0.5]`):
The sequence of standard deviations for each channel, to be used when normalizing images for codebook. Can
be overridden by the `codebook_image_std` parameter in `preprocess`.
'''
def __init__(self, do_resize: bool=True, size: Optional[dict[str, int]]=None, resample: PILImageResampling=PILImageResampling.BICUBIC, do_center_crop: bool=True, crop_size: Optional[dict[str, int]]=None, do_rescale: bool=True, rescale_factor: Union[int, float]=1 / 255, do_normalize: bool=True, image_mean: Optional[Union[float, Iterable[float]]]=None, image_std: Optional[Union[float, Iterable[float]]]=None, return_image_mask: bool=False, input_size_patches: int=14, total_mask_patches: int=75, mask_group_min_patches: int=16, mask_group_max_patches: Optional[int]=None, mask_group_min_aspect_ratio: float=0.3, mask_group_max_aspect_ratio: Optional[float]=None, return_codebook_pixels: bool=False, codebook_do_resize: bool=True, codebook_size: Optional[bool]=None, codebook_resample: int=PILImageResampling.LANCZOS, codebook_do_center_crop: bool=True, codebook_crop_size: Optional[int]=None, codebook_do_rescale: bool=True, codebook_rescale_factor: Union[int, float]=1 / 255, codebook_do_map_pixels: bool=True, codebook_do_normalize: bool=True, codebook_image_mean: Optional[Union[float, Iterable[float]]]=None, codebook_image_std: Optional[Union[float, Iterable[float]]]=None, **kwargs) -> None:
pass
@classmethod
def from_dict(cls, image_processor_dict: dict[str, Any], **kwargs):
'''
Overrides the `from_dict` method from the base class to make sure parameters are updated if image processor is
created using from_dict and kwargs e.g. `FlavaImageProcessor.from_pretrained(checkpoint, codebook_size=600)`
'''
pass
@lru_cache
def masking_generator(self, input_size_patches, total_mask_patches, mask_group_min_patches, mask_group_max_patches, mask_group_min_aspect_ratio, mask_group_max_aspect_ratio) -> FlavaMaskingGenerator:
pass
def resize(self, image: np.ndarray, size: dict[str, int], resample: PILImageResampling=PILImageResampling.BICUBIC, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray:
'''
Resize an image to `(size["height"], size["width"])`.
Args:
image (`np.ndarray`):
Image to resize.
size (`dict[str, int]`):
Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image.
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
`PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BICUBIC`.
data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the output image. If unset, the channel dimension format of the input
image is used. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
Returns:
`np.ndarray`: The resized image.
'''
pass
def map_pixels(self, image: np.ndarray) -> np.ndarray:
pass
def _preprocess_image(self, image: ImageInput, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, resample: Optional[PILImageResampling]=None, do_center_crop: Optional[bool]=None, crop_size: Optional[dict[str, int]]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, do_map_pixels: Optional[bool]=None, data_format: Optional[ChannelDimension]=ChannelDimension.FIRST, input_data_format: Optional[ChannelDimension]=None) -> np.ndarray:
'''Preprocesses a single image.'''
pass
@filter_out_non_signature_kwargs()
def preprocess(self, images: ImageInput, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, resample: Optional[PILImageResampling]=None, do_center_crop: Optional[bool]=None, crop_size: Optional[dict[str, int]]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, return_image_mask: Optional[bool]=None, input_size_patches: Optional[int]=None, total_mask_patches: Optional[int]=None, mask_group_min_patches: Optional[int]=None, mask_group_max_patches: Optional[int]=None, mask_group_min_aspect_ratio: Optional[float]=None, mask_group_max_aspect_ratio: Optional[float]=None, return_codebook_pixels: Optional[bool]=None, codebook_do_resize: Optional[bool]=None, codebook_size: Optional[dict[str, int]]=None, codebook_resample: Optional[int]=None, codebook_do_center_crop: Optional[bool]=None, codebook_crop_size: Optional[dict[str, int]]=None, codebook_do_rescale: Optional[bool]=None, codebook_rescale_factor: Optional[float]=None, codebook_do_map_pixels: Optional[bool]=None, codebook_do_normalize: Optional[bool]=None, codebook_image_mean: Optional[Iterable[float]]=None, codebook_image_std: Optional[Iterable[float]]=None, return_tensors: Optional[Union[str, TensorType]]=None, data_format: ChannelDimension=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> PIL.Image.Image:
'''
Preprocess an image or batch of images.
Args:
images (`ImageInput`):
Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
passing in images with pixel values between 0 and 1, set `do_rescale=False`.
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
Whether to resize the image.
size (`dict[str, int]`, *optional*, defaults to `self.size`):
Size of the image.
resample (`int`, *optional*, defaults to `self.resample`):
Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`, Only
has an effect if `do_resize` is set to `True`.
do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`):
Whether to center crop the image.
crop_size (`dict[str, int]`, *optional*, defaults to `self.crop_size`):
Size of the center crop. Only has an effect if `do_center_crop` is set to `True`.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the image values between [0 - 1].
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
Whether to normalize the image.
image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`):
Image mean.
image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`):
Image standard deviation.
return_image_mask (`bool`, *optional*, defaults to `self.return_image_mask`):
Whether to return the image mask.
input_size_patches (`int`, *optional*, defaults to `self.input_size_patches`):
Size of the patches to extract from the image.
total_mask_patches (`int`, *optional*, defaults to `self.total_mask_patches`):
Total number of patches to extract from the image.
mask_group_min_patches (`int`, *optional*, defaults to `self.mask_group_min_patches`):
Minimum number of patches to extract from the image.
mask_group_max_patches (`int`, *optional*, defaults to `self.mask_group_max_patches`):
Maximum number of patches to extract from the image.
mask_group_min_aspect_ratio (`float`, *optional*, defaults to `self.mask_group_min_aspect_ratio`):
Minimum aspect ratio of the patches to extract from the image.
mask_group_max_aspect_ratio (`float`, *optional*, defaults to `self.mask_group_max_aspect_ratio`):
Maximum aspect ratio of the patches to extract from the image.
return_codebook_pixels (`bool`, *optional*, defaults to `self.return_codebook_pixels`):
Whether to return the codebook pixels.
codebook_do_resize (`bool`, *optional*, defaults to `self.codebook_do_resize`):
Whether to resize the codebook pixels.
codebook_size (`dict[str, int]`, *optional*, defaults to `self.codebook_size`):
Size of the codebook pixels.
codebook_resample (`int`, *optional*, defaults to `self.codebook_resample`):
Resampling filter to use if resizing the codebook pixels. This can be one of the enum
`PILImageResampling`, Only has an effect if `codebook_do_resize` is set to `True`.
codebook_do_center_crop (`bool`, *optional*, defaults to `self.codebook_do_center_crop`):
Whether to center crop the codebook pixels.
codebook_crop_size (`dict[str, int]`, *optional*, defaults to `self.codebook_crop_size`):
Size of the center crop of the codebook pixels. Only has an effect if `codebook_do_center_crop` is set
to `True`.
codebook_do_rescale (`bool`, *optional*, defaults to `self.codebook_do_rescale`):
Whether to rescale the codebook pixels values between [0 - 1].
codebook_rescale_factor (`float`, *optional*, defaults to `self.codebook_rescale_factor`):
Rescale factor to rescale the codebook pixels by if `codebook_do_rescale` is set to `True`.
codebook_do_map_pixels (`bool`, *optional*, defaults to `self.codebook_do_map_pixels`):
Whether to map the codebook pixels values.
codebook_do_normalize (`bool`, *optional*, defaults to `self.codebook_do_normalize`):
Whether to normalize the codebook pixels.
codebook_image_mean (`float` or `list[float]`, *optional*, defaults to `self.codebook_image_mean`):
Codebook pixels mean to normalize the codebook pixels by if `codebook_do_normalize` is set to `True`.
codebook_image_std (`float` or `list[float]`, *optional*, defaults to `self.codebook_image_std`):
Codebook pixels standard deviation to normalize the codebook pixels by if `codebook_do_normalize` is
set to `True`.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. Can be one of:
- `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `ChannelDimension.LAST`: image in (height, width, num_channels) format.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
'''
pass
| 12
| 5
| 66
| 4
| 46
| 17
| 8
| 0.64
| 1
| 10
| 3
| 1
| 6
| 29
| 7
| 27
| 565
| 34
| 324
| 146
| 214
| 207
| 126
| 44
| 118
| 33
| 3
| 1
| 58
|
2,539
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/flava/image_processing_flava.py
|
transformers.models.flava.image_processing_flava.FlavaMaskingGenerator
|
import random
import numpy as np
from typing import Any, Optional, Union
import math
class FlavaMaskingGenerator:
def __init__(self, input_size: Union[int, tuple[int, int]]=14, total_mask_patches: int=75, mask_group_max_patches: Optional[int]=None, mask_group_min_patches: int=16, mask_group_min_aspect_ratio: Optional[float]=0.3, mask_group_max_aspect_ratio: Optional[float]=None):
if not isinstance(input_size, tuple):
input_size = (input_size,) * 2
self.height, self.width = input_size
self.num_patches = self.height * self.width
self.total_mask_patches = total_mask_patches
self.mask_group_min_patches = mask_group_min_patches
self.mask_group_max_patches = total_mask_patches if mask_group_max_patches is None else mask_group_max_patches
mask_group_max_aspect_ratio = mask_group_max_aspect_ratio or 1 / mask_group_min_aspect_ratio
self.log_aspect_ratio = (math.log(mask_group_min_aspect_ratio), math.log(mask_group_max_aspect_ratio))
def __repr__(self):
repr_str = 'MaskingGenerator(%d, %d -> [%d ~ %d], max = %d, %.3f ~ %.3f)' % (self.height, self.width, self.mask_group_min_patches, self.mask_group_max_patches, self.total_mask_patches, self.log_aspect_ratio[0], self.log_aspect_ratio[1])
return repr_str
def get_shape(self):
return (self.height, self.width)
def _mask(self, mask, max_mask_patches):
delta = 0
for _attempt in range(10):
target_area = random.uniform(self.mask_group_min_patches, max_mask_patches)
aspect_ratio = math.exp(random.uniform(*self.log_aspect_ratio))
height = int(round(math.sqrt(target_area * aspect_ratio)))
width = int(round(math.sqrt(target_area / aspect_ratio)))
if width < self.width and height < self.height:
top = random.randint(0, self.height - height)
left = random.randint(0, self.width - width)
num_masked = mask[top:top + height, left:left + width].sum()
if 0 < height * width - num_masked <= max_mask_patches:
for i in range(top, top + height):
for j in range(left, left + width):
if mask[i, j] == 0:
mask[i, j] = 1
delta += 1
if delta > 0:
break
return delta
def __call__(self):
mask = np.zeros(shape=self.get_shape(), dtype=int)
mask_count = 0
while mask_count < self.total_mask_patches:
max_mask_patches = self.total_mask_patches - mask_count
max_mask_patches = min(max_mask_patches, self.mask_group_max_patches)
delta = self._mask(mask, max_mask_patches)
if delta == 0:
break
else:
mask_count += delta
return mask
|
class FlavaMaskingGenerator:
def __init__(self, input_size: Union[int, tuple[int, int]]=14, total_mask_patches: int=75, mask_group_max_patches: Optional[int]=None, mask_group_min_patches: int=16, mask_group_min_aspect_ratio: Optional[float]=0.3, mask_group_max_aspect_ratio: Optional[float]=None):
pass
def __repr__(self):
pass
def get_shape(self):
pass
def _mask(self, mask, max_mask_patches):
pass
def __call__(self):
pass
| 6
| 0
| 14
| 1
| 13
| 0
| 3
| 0.02
| 0
| 4
| 0
| 0
| 5
| 7
| 5
| 5
| 76
| 11
| 64
| 36
| 50
| 1
| 47
| 28
| 41
| 8
| 0
| 6
| 16
|
2,540
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/flava/modeling_flava.py
|
transformers.models.flava.modeling_flava.FlavaAttention
|
from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer
from torch import nn
from typing import Any, Optional, Union
import torch
class FlavaAttention(nn.Module):
def __init__(self, config: FlavaPossibleConfigs) -> None:
super().__init__()
self.attention = FlavaSelfAttention(config)
self.output = FlavaSelfOutput(config)
self.pruned_heads = set()
def prune_heads(self, heads: set[int]) -> None:
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(heads, self.attention.num_attention_heads, self.attention.attention_head_size, self.pruned_heads)
self.attention.query = prune_linear_layer(self.attention.query, index)
self.attention.key = prune_linear_layer(self.attention.key, index)
self.attention.value = prune_linear_layer(self.attention.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
self.attention.num_attention_heads = self.attention.num_attention_heads - len(heads)
self.attention.all_head_size = self.attention.attention_head_size * self.attention.num_attention_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, output_attentions: bool=False) -> Union[tuple[torch.Tensor, torch.Tensor], tuple[torch.Tensor]]:
self_outputs = self.attention(hidden_states, attention_mask=attention_mask, head_mask=head_mask, output_attentions=output_attentions)
attention_output = self.output(self_outputs[0], hidden_states)
outputs = (attention_output,) + self_outputs[1:]
return outputs
|
class FlavaAttention(nn.Module):
def __init__(self, config: FlavaPossibleConfigs) -> None:
pass
def prune_heads(self, heads: set[int]) -> None:
pass
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, output_attentions: bool=False) -> Union[tuple[torch.Tensor, torch.Tensor], tuple[torch.Tensor]]:
pass
| 4
| 0
| 12
| 1
| 10
| 1
| 1
| 0.09
| 1
| 7
| 2
| 0
| 3
| 3
| 3
| 13
| 40
| 6
| 32
| 17
| 22
| 3
| 22
| 11
| 18
| 2
| 1
| 1
| 4
|
2,541
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/flava/modeling_flava.py
|
transformers.models.flava.modeling_flava.FlavaEncoder
|
from torch import nn
from .configuration_flava import FlavaConfig, FlavaImageCodebookConfig, FlavaImageConfig, FlavaMultimodalConfig, FlavaTextConfig
import torch
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling
from typing import Any, Optional, Union
class FlavaEncoder(nn.Module):
def __init__(self, config: FlavaConfig) -> None:
super().__init__()
self.config = config
self.layer = nn.ModuleList([FlavaLayer(config) for _ in range(config.num_hidden_layers)])
self.gradient_checkpointing = False
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True) -> Union[tuple, BaseModelOutput]:
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_head_mask = head_mask[i] if head_mask is not None else None
layer_outputs = layer_module(hidden_states, attention_mask, layer_head_mask, output_attentions)
hidden_states = layer_outputs[0]
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple((v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None))
return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions)
|
class FlavaEncoder(nn.Module):
def __init__(self, config: FlavaConfig) -> None:
pass
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True) -> Union[tuple, BaseModelOutput]:
pass
| 3
| 0
| 24
| 4
| 20
| 0
| 6
| 0
| 1
| 9
| 3
| 0
| 2
| 3
| 2
| 12
| 49
| 8
| 41
| 19
| 30
| 0
| 24
| 11
| 21
| 10
| 1
| 2
| 11
|
2,542
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/flava/modeling_flava.py
|
transformers.models.flava.modeling_flava.FlavaForPreTraining
|
from torch import nn
from .configuration_flava import FlavaConfig, FlavaImageCodebookConfig, FlavaImageConfig, FlavaMultimodalConfig, FlavaTextConfig
import torch
from typing import Any, Optional, Union
from ...utils import ModelOutput, auto_docstring, filter_out_non_signature_kwargs, logging, torch_int
@auto_docstring(custom_intro='\n The FLAVA model for pretraining which outputs losses, embeddings, logits and transformer outputs.\n ')
class FlavaForPreTraining(FlavaPreTrainedModel):
_tied_weights_keys = ['mmm_text_head.decoder.bias', 'mmm_image_head.decoder.bias', 'mlm_head.decoder.bias', 'mim_head.decoder.bias']
def __init__(self, config: FlavaConfig, image_codebook: Optional[nn.Module]=None):
"""
image_codebook ([`nn.Module`]):
If passed, the image codebook will be set to this. Otherwise, it will be initialized using the
image_codebook_config defined in the config first as the first parameter.
"""
super().__init__(config)
self.flava = FlavaModel(config)
self.image_codebook = image_codebook
if self.image_codebook is None and config.init_codebook:
self.image_codebook = FlavaImageCodebook(config.image_codebook_config)
self.mim_head = FlavaMaskedPredictionHead(config.image_config)
self.mlm_head = FlavaMaskedPredictionHead(config.text_config)
self.itm_head = FlavaITMHead(config)
self.mmm_image_head = FlavaMaskedPredictionHead(config.image_config)
self.mmm_text_head = FlavaMaskedPredictionHead(config.text_config)
self.global_contrastive_head = FlavaGlobalContrastiveHead(config)
self.image_vocab_size = config.image_config.vocab_size
self.text_vocab_size = config.text_config.vocab_size
self.mlm_weight = config.mlm_weight
self.mim_weight = config.mim_weight
self.global_contrastive_weight = config.global_contrastive_weight
self.ce_ignore_index = config.ce_ignore_index
self.itm_weight = config.itm_weight
self.mmm_image_weight = config.mmm_image_weight
self.mmm_text_weight = config.mmm_text_weight
self.skip_unmasked_multimodal_encoder = config.skip_unmasked_multimodal_encoder
self.post_init()
def _resize_to_2d(self, x: torch.Tensor):
if x.dim() > 2:
x = x.view(x.size(0), -1)
return x
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, input_ids_masked: Optional[torch.LongTensor]=None, pixel_values: Optional[torch.FloatTensor]=None, codebook_pixel_values: Optional[torch.FloatTensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, bool_masked_pos: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, image_attention_mask: Optional[torch.Tensor]=None, skip_unmasked_multimodal_encoder: Optional[bool]=None, mlm_labels: Optional[torch.Tensor]=None, mim_labels: Optional[torch.Tensor]=None, itm_labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: bool=True, return_dict: Optional[bool]=None, return_loss: Optional[bool]=None) -> Union[tuple[torch.Tensor], FlavaForPreTrainingOutput]:
"""
input_ids (`torch.LongTensor` of shape `(batch_size, text_seq_len)`):
Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See
[`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input
IDs?](../glossary#input-ids)
input_ids_masked (`torch.LongTensor` of shape `(batch_size, text_seq_len)`):
Indices of input sequence tokens in the vocabulary. These ones are the masked version of the original task
to be used with MLM. Indices can be obtained using [`AutoTokenizer`] along with
[`DataCollatorForMaskedLanguageModeling`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids)
codebook_pixel_values (`torch.FloatTensor` of shape `(batch_size, num_image_patches, patch_size, patch_size, 3)`, *optional*):
Pixel values for image patches that are used to compute the image codebook labels for masked image modeling.
token_type_ids (`torch.LongTensor` of shape `(batch_size, text_seq_len)`, *optional*):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
1]`:
- 0 corresponds to a *sentence A* token,
- 1 corresponds to a *sentence B* token.
[What are token type IDs?](../glossary#token-type-ids)
bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, image_num_patches)`):
Boolean masked positions. Indicates which patches are masked (1) and which aren't (0).
image_attention_mask (`torch.FloatTensor` of shape `(batch_size, image_num_patches)`, *optional*):
Mask to avoid performing attention on padding token indices specifically for images. Mask values selected
in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
skip_unmasked_multimodal_encoder (*bool*, *optional*):
Skip any calculations for multimodal encoder for unmasked inputs. FLAVA pretraining doesn't need unmasked
multimodal embeddings or outputs as of now.
mlm_labels (`torch.LongTensor` of shape `(batch_size, text_seq_len)`, *optional*):
Labels for computing the left-to-right language and multimodal masked modeling loss (next word prediction).
Indices should be in `[-100, 0, ..., text_config.vocab_size - 1]` (see `input_ids` docstring). Tokens with
indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0,
..., text_config.vocab_size - 1]`.
mim_labels (`torch.LongTensor` of shape `(batch_size, image_num_patches)`, *optional*):
Labels for computing the image and multimodal masked modeling loss. Indices should be in `[-100, 0, ...,
image_config.vocab_size - 1]`. Tokens with indices set to `-100` are ignored (masked), the loss is only
computed for the tokens with labels in `[0, ..., image_config.vocab_size - 1]`. If not passed, they are
generated automatically using the image codebook assigned to the model. By default, it uses
[`FlavaImageCodebook`]. See [`FlavaImageCodebook`] to understand how to generate mim_labels.
itm_labels (`torch.LongTensor` of shape `(batch_size, 1)`, *optional*):
Labels for computing the image-text matching loss. 0 means the pairs don't match and 1 means they match.
The pairs with 0 will be skipped for calculation of MMM and global contrastive losses as well.
return_loss (`bool`, *optional*, default to None):
Whether to return calculated loss or not.
Examples:
```python
>>> from PIL import Image
>>> import requests
>>> from transformers import FlavaForPreTraining, AutoProcessor
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> model = FlavaForPreTraining.from_pretrained("facebook/flava-full")
>>> processor = AutoProcessor.from_pretrained("facebook/flava-full")
>>> text = ["a photo of a cat"]
>>> inputs = processor(
... images=[image],
... text=text,
... return_masks=True,
... return_codebook_pixels=True,
... padding=True,
... max_length=77,
... return_tensors="pt",
... )
>>> output = model(**inputs)
```
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
return_loss = return_loss if return_loss is not None else self.config.return_loss
skip_unmasked_multimodal_encoder = skip_unmasked_multimodal_encoder if skip_unmasked_multimodal_encoder is not None else self.skip_unmasked_multimodal_encoder
if input_ids_masked is None and input_ids is not None:
logger.warning("`input_ids_masked` isn't passed which means MLM loss won't be calculated correctlySetting it to `input_ids` so that model can work. Please pass it if this is unintentional. This is usually OKAY if you are doing inference on unmasked text...")
input_ids_masked = input_ids
flava_output = self.flava(input_ids=input_ids, pixel_values=pixel_values, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, image_attention_mask=image_attention_mask, skip_multimodal_encoder=skip_unmasked_multimodal_encoder, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True)
flava_masked_output = self.flava(input_ids=input_ids_masked, pixel_values=pixel_values, attention_mask=attention_mask, token_type_ids=token_type_ids, image_attention_mask=image_attention_mask, bool_masked_pos=bool_masked_pos, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True)
pos_mask = None
image_embeddings = flava_output.image_embeddings
text_embeddings = flava_output.text_embeddings
image_masked_embeddings = flava_masked_output.image_embeddings
text_masked_embeddings = flava_masked_output.text_embeddings
multimodal_masked_embeddings = flava_masked_output.multimodal_embeddings
total_loss = mim_loss = mlm_loss = mmm_text_loss = mmm_image_loss = gc_loss = itm_loss = None
mim_logits = mlm_logits = mmm_text_logits = mmm_image_logits = None
itm_logits = logits_per_image = logits_per_text = None
if image_masked_embeddings is not None or multimodal_masked_embeddings is not None:
if mim_labels is None and return_loss:
if self.image_codebook is None:
raise RuntimeError('`return_loss` is set to True but the image codebook is not initialized and no `mim_labels` have been passed. Reinstantiate the model with `init_codebook` set to True or pass in your custom `mim_labels`')
if codebook_pixel_values is None:
raise ValueError('`codebook_pixel_value` are required to generate `mim_labels` if loss is expected. Call `AutoProcessor` with `return_codebook_pixels` set to True')
mim_labels = self.image_codebook.get_codebook_indices(codebook_pixel_values)
if self.mim_weight > 0 and image_masked_embeddings is not None and (multimodal_masked_embeddings is None):
sequence_for_image = image_masked_embeddings
if mim_labels is not None:
mim_labels = self._resize_to_2d(mim_labels)
bool_masked_pos = self._resize_to_2d(bool_masked_pos)
mim_labels[bool_masked_pos.ne(True)] = self.ce_ignore_index
sequence_for_image = sequence_for_image[:, -mim_labels.size(1):, :]
masked_tokens = mim_labels.ne(self.ce_ignore_index)
mim_labels_filtered = mim_labels[masked_tokens]
sequence_for_image = sequence_for_image[masked_tokens, :]
mim_logits = self.mim_head(sequence_for_image)
if return_loss:
mim_loss = nn.functional.cross_entropy(mim_logits.view(-1, self.image_vocab_size), mim_labels_filtered.view(-1))
mim_loss *= self.mim_weight
else:
mim_logits = self.mim_head(sequence_for_image)
if self.mlm_weight > 0 and text_masked_embeddings is not None and (multimodal_masked_embeddings is None):
sequence_for_text = text_masked_embeddings
if mlm_labels is not None:
mlm_labels = self._resize_to_2d(mlm_labels)
sequence_for_text = sequence_for_text[:, -mlm_labels.size(1):, :]
masked_tokens = mlm_labels.ne(self.ce_ignore_index)
mlm_labels_filtered = mlm_labels[masked_tokens]
sequence_for_text = sequence_for_text[masked_tokens, :]
mlm_logits = self.mlm_head(sequence_for_text)
if return_loss:
mlm_loss = nn.functional.cross_entropy(mlm_logits.view(-1, self.text_vocab_size), mlm_labels_filtered.view(-1))
mlm_loss *= self.mlm_weight
else:
mlm_logits = self.mlm_head(sequence_for_text)
if self.itm_weight > 0 and multimodal_masked_embeddings is not None:
itm_logits = self.itm_head(multimodal_masked_embeddings)
if itm_labels is not None:
pos_pairs = itm_labels.ne(0)
pos_mask = torch.where(pos_pairs.any(), pos_pairs, pos_pairs.new([True]))
if return_loss:
itm_loss = nn.functional.cross_entropy(itm_logits, itm_labels)
itm_loss *= self.itm_weight
if multimodal_masked_embeddings is not None:
multimodal_masked_embeddings = multimodal_masked_embeddings[pos_mask]
if mlm_labels is not None:
mlm_labels = mlm_labels[pos_mask]
if mim_labels is not None:
mim_labels = mim_labels[pos_mask]
bool_masked_pos = bool_masked_pos[pos_mask]
if multimodal_masked_embeddings is not None and self.mmm_image_weight > 0:
sequence_for_image = multimodal_masked_embeddings
end_index = image_masked_embeddings.size(1) - 1
sequence_for_image = sequence_for_image[:, 2:2 + end_index, :]
if mim_labels is not None:
mim_labels = self._resize_to_2d(mim_labels)
bool_masked_pos = self._resize_to_2d(bool_masked_pos)
mim_labels[bool_masked_pos.ne(True)] = self.ce_ignore_index
masked_tokens = mim_labels.ne(self.ce_ignore_index)
mim_labels_filtered = mim_labels[masked_tokens]
sequence_for_image = sequence_for_image[masked_tokens, :]
mmm_image_logits = self.mmm_image_head(sequence_for_image)
if return_loss:
mmm_image_loss = nn.functional.cross_entropy(mmm_image_logits.view(-1, self.image_vocab_size), mim_labels_filtered.view(-1))
mmm_image_loss *= self.mmm_image_weight
else:
mmm_image_logits = self.mmm_image_head(sequence_for_image)
if multimodal_masked_embeddings is not None and self.mmm_text_weight > 0:
sequence_for_text = multimodal_masked_embeddings
sequence_for_text = sequence_for_text[:, -text_masked_embeddings.size(1):, :]
if mlm_labels is not None:
mlm_labels = self._resize_to_2d(mlm_labels)
masked_tokens = mlm_labels.ne(self.ce_ignore_index)
mlm_labels_filtered = mlm_labels[masked_tokens]
sequence_for_text = sequence_for_text[masked_tokens, :]
mmm_text_logits = self.mmm_text_head(sequence_for_text)
if return_loss:
mmm_text_loss = nn.functional.cross_entropy(mmm_text_logits.view(-1, self.text_vocab_size), mlm_labels_filtered.view(-1))
mmm_text_loss *= self.mmm_text_weight
else:
mmm_text_logits = self.mmm_text_head(sequence_for_text)
if image_embeddings is not None and text_embeddings is not None and (self.global_contrastive_weight > 0):
text_embedding = self.flava.text_projection(text_embeddings[:, 0, :])
text_embedding = nn.functional.normalize(text_embedding, dim=-1)
image_embedding = self.flava.image_projection(image_embeddings[:, 0, :])
image_embedding = nn.functional.normalize(image_embedding, dim=-1)
self.flava.logit_scale.data.clamp_(LOGIT_SCALE_CLAMP_MIN, LOGIT_SCALE_CLAMP_MAX)
logits_per_image, logits_per_text, gc_labels = self.global_contrastive_head(image_embedding, text_embedding, self.flava.logit_scale)
if pos_mask is not None:
logits_per_image = logits_per_image[pos_mask]
logits_per_text = logits_per_text[pos_mask]
gc_labels = gc_labels[pos_mask]
if return_loss:
gc_loss_image = nn.functional.cross_entropy(logits_per_image, gc_labels)
gc_loss_text = nn.functional.cross_entropy(logits_per_text, gc_labels)
gc_loss = (gc_loss_image + gc_loss_text) / 2
gc_loss *= self.global_contrastive_weight
flava_losses = FlavaLosses(mim=mim_loss, mlm=mlm_loss, itm=itm_loss, global_contrastive=gc_loss, mmm_image=mmm_image_loss, mmm_text=mmm_text_loss)
if return_loss and (not flava_losses.all_none()):
total_loss = sum((loss if loss is not None else 0 for loss in flava_losses.values()))
if not return_dict:
output = (image_embeddings, flava_output.image_output.to_tuple() if flava_output.image_output is not None else None, text_embeddings, flava_output.text_output.to_tuple() if flava_output.text_output is not None else None, flava_output.multimodal_embeddings, flava_output.multimodal_output.to_tuple() if flava_output.multimodal_output is not None else None, image_masked_embeddings, flava_masked_output.image_output.to_tuple() if flava_masked_output.image_output is not None else None, text_masked_embeddings, flava_masked_output.text_output.to_tuple() if flava_masked_output.text_output is not None else None, multimodal_masked_embeddings, flava_masked_output.multimodal_output.to_tuple() if flava_masked_output.multimodal_output is not None else None, mim_logits, mlm_logits, itm_logits, logits_per_image, logits_per_image, mmm_image_logits, mmm_text_logits)
if return_loss and (not flava_losses.all_none()):
output = (total_loss, flava_losses) + output
return tuple((x for x in output if x is None))
return FlavaForPreTrainingOutput(loss=total_loss, loss_info=flava_losses, image_embeddings=image_embeddings, image_output=flava_output.image_output, text_embeddings=text_embeddings, text_output=flava_output.text_output, multimodal_embeddings=flava_output.multimodal_embeddings, multimodal_output=flava_output.multimodal_output, image_masked_embeddings=image_masked_embeddings, image_masked_output=flava_masked_output.image_output, text_masked_embeddings=text_masked_embeddings, text_masked_output=flava_masked_output.text_output, multimodal_masked_embeddings=multimodal_masked_embeddings, multimodal_masked_output=flava_masked_output.multimodal_output, mim_logits=mim_logits, mlm_logits=mlm_logits, itm_logits=itm_logits, contrastive_logits_per_image=logits_per_image, contrastive_logits_per_text=logits_per_text, mmm_image_logits=mmm_image_logits, mmm_text_logits=mmm_text_logits)
|
@auto_docstring(custom_intro='\n The FLAVA model for pretraining which outputs losses, embeddings, logits and transformer outputs.\n ')
class FlavaForPreTraining(FlavaPreTrainedModel):
def __init__(self, config: FlavaConfig, image_codebook: Optional[nn.Module]=None):
'''
image_codebook ([`nn.Module`]):
If passed, the image codebook will be set to this. Otherwise, it will be initialized using the
image_codebook_config defined in the config first as the first parameter.
'''
pass
def _resize_to_2d(self, x: torch.Tensor):
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, input_ids_masked: Optional[torch.LongTensor]=None, pixel_values: Optional[torch.FloatTensor]=None, codebook_pixel_values: Optional[torch.FloatTensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, bool_masked_pos: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, image_attention_mask: Optional[torch.Tensor]=None, skip_unmasked_multimodal_encoder: Optional[bool]=None, mlm_labels: Optional[torch.Tensor]=None, mim_labels: Optional[torch.Tensor]=None, itm_labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: bool=True, return_dict: Optional[bool]=None, return_loss: Optional[bool]=None) -> Union[tuple[torch.Tensor], FlavaForPreTrainingOutput]:
'''
input_ids (`torch.LongTensor` of shape `(batch_size, text_seq_len)`):
Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See
[`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input
IDs?](../glossary#input-ids)
input_ids_masked (`torch.LongTensor` of shape `(batch_size, text_seq_len)`):
Indices of input sequence tokens in the vocabulary. These ones are the masked version of the original task
to be used with MLM. Indices can be obtained using [`AutoTokenizer`] along with
[`DataCollatorForMaskedLanguageModeling`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids)
codebook_pixel_values (`torch.FloatTensor` of shape `(batch_size, num_image_patches, patch_size, patch_size, 3)`, *optional*):
Pixel values for image patches that are used to compute the image codebook labels for masked image modeling.
token_type_ids (`torch.LongTensor` of shape `(batch_size, text_seq_len)`, *optional*):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
1]`:
- 0 corresponds to a *sentence A* token,
- 1 corresponds to a *sentence B* token.
[What are token type IDs?](../glossary#token-type-ids)
bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, image_num_patches)`):
Boolean masked positions. Indicates which patches are masked (1) and which aren't (0).
image_attention_mask (`torch.FloatTensor` of shape `(batch_size, image_num_patches)`, *optional*):
Mask to avoid performing attention on padding token indices specifically for images. Mask values selected
in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
skip_unmasked_multimodal_encoder (*bool*, *optional*):
Skip any calculations for multimodal encoder for unmasked inputs. FLAVA pretraining doesn't need unmasked
multimodal embeddings or outputs as of now.
mlm_labels (`torch.LongTensor` of shape `(batch_size, text_seq_len)`, *optional*):
Labels for computing the left-to-right language and multimodal masked modeling loss (next word prediction).
Indices should be in `[-100, 0, ..., text_config.vocab_size - 1]` (see `input_ids` docstring). Tokens with
indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0,
..., text_config.vocab_size - 1]`.
mim_labels (`torch.LongTensor` of shape `(batch_size, image_num_patches)`, *optional*):
Labels for computing the image and multimodal masked modeling loss. Indices should be in `[-100, 0, ...,
image_config.vocab_size - 1]`. Tokens with indices set to `-100` are ignored (masked), the loss is only
computed for the tokens with labels in `[0, ..., image_config.vocab_size - 1]`. If not passed, they are
generated automatically using the image codebook assigned to the model. By default, it uses
[`FlavaImageCodebook`]. See [`FlavaImageCodebook`] to understand how to generate mim_labels.
itm_labels (`torch.LongTensor` of shape `(batch_size, 1)`, *optional*):
Labels for computing the image-text matching loss. 0 means the pairs don't match and 1 means they match.
The pairs with 0 will be skipped for calculation of MMM and global contrastive losses as well.
return_loss (`bool`, *optional*, default to None):
Whether to return calculated loss or not.
Examples:
```python
>>> from PIL import Image
>>> import requests
>>> from transformers import FlavaForPreTraining, AutoProcessor
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> model = FlavaForPreTraining.from_pretrained("facebook/flava-full")
>>> processor = AutoProcessor.from_pretrained("facebook/flava-full")
>>> text = ["a photo of a cat"]
>>> inputs = processor(
... images=[image],
... text=text,
... return_masks=True,
... return_codebook_pixels=True,
... padding=True,
... max_length=77,
... return_tensors="pt",
... )
>>> output = model(**inputs)
```
'''
pass
| 6
| 2
| 118
| 15
| 91
| 13
| 15
| 0.14
| 1
| 14
| 8
| 0
| 3
| 18
| 3
| 4
| 370
| 47
| 283
| 70
| 256
| 40
| 151
| 48
| 147
| 40
| 2
| 3
| 44
|
2,543
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/flava/modeling_flava.py
|
transformers.models.flava.modeling_flava.FlavaForPreTrainingOutput
|
import torch
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling
from typing import Any, Optional, Union
from ...utils import ModelOutput, auto_docstring, filter_out_non_signature_kwargs, logging, torch_int
from dataclasses import dataclass
@dataclass
@auto_docstring(custom_intro="\n Output from FlavaForPreTraining containing embeddings, and outputs from individual encoders.\n\n Note that `image_embeddings` and `text_embeddings` returned are similar to pooled output returned from a\n transformer. If you want embeddings for contrastive loss or retrieval use a FLAVA model's `image_projection` and\n `text_projection` layers on `image_embeddings` and `text_embeddings` respectively.\n ")
class FlavaForPreTrainingOutput(ModelOutput):
"""
loss (`torch.FloatTensor`, *optional*, returned when `return_loss` is True):
Total loss calculated for this model.
loss_info (`FlavaLosses`):
Detailed info for FLAVA Pretraining losses. Check `FlavaLosses` class description for the information on
the keys.
image_embeddings (`torch.FloatTensor` of shape `(batch_size, output_dim)`, *optional*, returned when `pixel_values` are present):
The image embeddings which are basically the pooled output of [`FlavaImageModel`].
image_output (`BaseModelOutputWithPooling`, *optional*, returned when `pixel_values` are present):
The output of the [`FlavaImageModel`].
text_embeddings (`torch.FloatTensor` of shape `(batch_size, output_dim)`, *optional*, returned when `input_ids` are present):
The text embeddings which are basically the pooled output of [`FlavaTextModel`].
text_output (`BaseModelOutputWithPooling`, *optional*, returned when `input_ids` are present):
The output of the [`FlavaTextModel`].
multimodal_embeddings (`torch.FloatTensor` of shape `(batch_size, output_dim)`, *optional*, returned when `input_ids` and `pixel_values` are present and `skip_unmasked_multimodal_encoder` is `None` or `False`):
The multimodal embeddings which are basically the pooled output of [`FlavaTextModel`].
multimodal_output (`BaseModelOutputWithPooling`, returned when `input_ids` and `pixel_values` are present and `skip_unmasked_multimodal_encoder` is `None` or `False`):
The output of the [`FlavaMultimodalModel`].
image_masked_embeddings (`torch.FloatTensor` of shape `(batch_size, output_dim)`, *optional*, returned when `pixel_values` are present):
The image embeddings which are basically the pooled output of [`FlavaImageModel`]. Uses `bool_masked_pos`
to create masked images.
image_masked_output (`BaseModelOutputWithPooling`, *optional*, returned when `pixel_values` are present):
The output of the [`FlavaImageModel`]. Uses `bool_masked_pos` to create masked images.
text_masked_embeddings (`torch.FloatTensor` of shape `(batch_size, output_dim)`, *optional*, returned when `input_ids_masked` are present):
The text embeddings which are basically the pooled output of [`FlavaTextModel`].
text_masked_output (`BaseModelOutputWithPooling`, *optional*, returned when `input_ids_masked` are present):
The output of the [`FlavaTextModel`].
multimodal_masked_embeddings (`torch.FloatTensor` of shape `(batch_size, output_dim)`, *optional*, returned when `input_ids` and `pixel_values` are present):
The multimodal embeddings which are basically the pooled output of [`FlavaTextModel`].
multimodal_masked_output (`BaseModelOutputWithPooling`, *optional*, returned when `input_ids_masked` and `pixel_values` are present):
The output of the [`FlavaMultimodalModel`].
mim_logits (`torch.FloatTensor` of shape `(batch_size, num_image_patches, image_vocab_size)` or of shape `(total_masked_patches, image_vocab_size)` , *optional*, returned when `pixel_values` are present and `input_ids_masked` are not):
The logits for MIM unimodal loss. Uses `book_masked_pos` to get masked patches. The flattened output is
returned when `bool_masked_pos` has some of the patches masked.
mlm_logits (`torch.FloatTensor` of shape `(batch_size, text_seq_length, text_vocab_size)` or of shape `(total_masked_seq_length, text_vocab_size)`, *optional*, returned when `input_ids_masked` are present and `pixel_values` are not):
The logits for MLM unimodal loss. The flattened output is returned when `input_ids_masked` has some of
the tokens masked.
itm_logits (`torch.FloatTensor` of shape `(batch_size, 2)`, *optional*, returned when `input_ids_masked` and `pixel_values` are present):
The logits for ITM loss. Note that ITM loss is calculated on masked pairs in FLAVA.
contrastive_logits_per_image (`torch.FloatTensor` of shape `(image_batch_size, text_batch_size)`):
The scaled dot product scores between `image_embeddings` and `text_embeddings` but passed through FLAVA's
`image_projection` and `text_projection` layers respectively. This represents the image-text similarity
scores. This is calculated on unmasked images and texts.
contrastive_logits_per_text (`torch.FloatTensor` of shape `(text_batch_size, image_batch_size)`):
The scaled dot product scores between `text_embeddings` and `image_embeddings` but passed through FLAVA's
`text_projection` and `image_projection` layers respectively. This is calculated on unmasked images and
texts.
mmm_image_logits (`torch.FloatTensor` of shape `(batch_size, num_image_patches, image_vocab_size)` or of shape`(total_masked_patches, image_vocab_size)`, *optional*, returned when `pixel_values` and `input_ids_masked` are present):
The logits for MMM image multimodal loss. Uses `book_masked_pos` to get masked patches. The flattened
output is returned when `bool_masked_pos` has some of the patches masked.
mmm_text_logits (`torch.FloatTensor` of shape `(batch_size, text_seq_length, text_vocab_size)` or of shape `(`(total_masked_seq_length, text_vocab_size)`), *optional*, returned when `pixel_values` and `input_ids_masked` are present):
The logits for MMM text multimodal loss. The flattened output is returned when `input_ids_masked` has
some of the tokens masked.
"""
loss: Optional[torch.FloatTensor] = None
loss_info: FlavaLosses = None
image_embeddings: Optional[torch.FloatTensor] = None
image_output: Optional[BaseModelOutputWithPooling] = None
text_embeddings: Optional[torch.FloatTensor] = None
text_output: Optional[BaseModelOutputWithPooling] = None
multimodal_embeddings: Optional[torch.FloatTensor] = None
multimodal_output: Optional[BaseModelOutputWithPooling] = None
image_masked_embeddings: Optional[torch.FloatTensor] = None
image_masked_output: Optional[BaseModelOutputWithPooling] = None
text_masked_embeddings: Optional[torch.FloatTensor] = None
text_masked_output: Optional[BaseModelOutputWithPooling] = None
multimodal_masked_embeddings: Optional[torch.FloatTensor] = None
multimodal_masked_output: Optional[BaseModelOutputWithPooling] = None
mim_logits: Optional[torch.FloatTensor] = None
mlm_logits: Optional[torch.FloatTensor] = None
itm_logits: Optional[torch.FloatTensor] = None
contrastive_logits_per_image: Optional[torch.FloatTensor] = None
contrastive_logits_per_text: Optional[torch.FloatTensor] = None
mmm_image_logits: Optional[torch.FloatTensor] = None
mmm_text_logits: Optional[torch.FloatTensor] = None
def to_tuple(self) -> tuple[Any]:
transformer_outputs = ['text_output', 'image_output', 'multimodal_output', 'text_masked_output', 'image_masked_output', 'multimodal_masked_output']
return tuple((self[k] if k not in transformer_outputs else getattr(self, k).to_tuple() for k in self.keys()))
|
@dataclass
@auto_docstring(custom_intro="\n Output from FlavaForPreTraining containing embeddings, and outputs from individual encoders.\n\n Note that `image_embeddings` and `text_embeddings` returned are similar to pooled output returned from a\n transformer. If you want embeddings for contrastive loss or retrieval use a FLAVA model's `image_projection` and\n `text_projection` layers on `image_embeddings` and `text_embeddings` respectively.\n ")
class FlavaForPreTrainingOutput(ModelOutput):
'''
loss (`torch.FloatTensor`, *optional*, returned when `return_loss` is True):
Total loss calculated for this model.
loss_info (`FlavaLosses`):
Detailed info for FLAVA Pretraining losses. Check `FlavaLosses` class description for the information on
the keys.
image_embeddings (`torch.FloatTensor` of shape `(batch_size, output_dim)`, *optional*, returned when `pixel_values` are present):
The image embeddings which are basically the pooled output of [`FlavaImageModel`].
image_output (`BaseModelOutputWithPooling`, *optional*, returned when `pixel_values` are present):
The output of the [`FlavaImageModel`].
text_embeddings (`torch.FloatTensor` of shape `(batch_size, output_dim)`, *optional*, returned when `input_ids` are present):
The text embeddings which are basically the pooled output of [`FlavaTextModel`].
text_output (`BaseModelOutputWithPooling`, *optional*, returned when `input_ids` are present):
The output of the [`FlavaTextModel`].
multimodal_embeddings (`torch.FloatTensor` of shape `(batch_size, output_dim)`, *optional*, returned when `input_ids` and `pixel_values` are present and `skip_unmasked_multimodal_encoder` is `None` or `False`):
The multimodal embeddings which are basically the pooled output of [`FlavaTextModel`].
multimodal_output (`BaseModelOutputWithPooling`, returned when `input_ids` and `pixel_values` are present and `skip_unmasked_multimodal_encoder` is `None` or `False`):
The output of the [`FlavaMultimodalModel`].
image_masked_embeddings (`torch.FloatTensor` of shape `(batch_size, output_dim)`, *optional*, returned when `pixel_values` are present):
The image embeddings which are basically the pooled output of [`FlavaImageModel`]. Uses `bool_masked_pos`
to create masked images.
image_masked_output (`BaseModelOutputWithPooling`, *optional*, returned when `pixel_values` are present):
The output of the [`FlavaImageModel`]. Uses `bool_masked_pos` to create masked images.
text_masked_embeddings (`torch.FloatTensor` of shape `(batch_size, output_dim)`, *optional*, returned when `input_ids_masked` are present):
The text embeddings which are basically the pooled output of [`FlavaTextModel`].
text_masked_output (`BaseModelOutputWithPooling`, *optional*, returned when `input_ids_masked` are present):
The output of the [`FlavaTextModel`].
multimodal_masked_embeddings (`torch.FloatTensor` of shape `(batch_size, output_dim)`, *optional*, returned when `input_ids` and `pixel_values` are present):
The multimodal embeddings which are basically the pooled output of [`FlavaTextModel`].
multimodal_masked_output (`BaseModelOutputWithPooling`, *optional*, returned when `input_ids_masked` and `pixel_values` are present):
The output of the [`FlavaMultimodalModel`].
mim_logits (`torch.FloatTensor` of shape `(batch_size, num_image_patches, image_vocab_size)` or of shape `(total_masked_patches, image_vocab_size)` , *optional*, returned when `pixel_values` are present and `input_ids_masked` are not):
The logits for MIM unimodal loss. Uses `book_masked_pos` to get masked patches. The flattened output is
returned when `bool_masked_pos` has some of the patches masked.
mlm_logits (`torch.FloatTensor` of shape `(batch_size, text_seq_length, text_vocab_size)` or of shape `(total_masked_seq_length, text_vocab_size)`, *optional*, returned when `input_ids_masked` are present and `pixel_values` are not):
The logits for MLM unimodal loss. The flattened output is returned when `input_ids_masked` has some of
the tokens masked.
itm_logits (`torch.FloatTensor` of shape `(batch_size, 2)`, *optional*, returned when `input_ids_masked` and `pixel_values` are present):
The logits for ITM loss. Note that ITM loss is calculated on masked pairs in FLAVA.
contrastive_logits_per_image (`torch.FloatTensor` of shape `(image_batch_size, text_batch_size)`):
The scaled dot product scores between `image_embeddings` and `text_embeddings` but passed through FLAVA's
`image_projection` and `text_projection` layers respectively. This represents the image-text similarity
scores. This is calculated on unmasked images and texts.
contrastive_logits_per_text (`torch.FloatTensor` of shape `(text_batch_size, image_batch_size)`):
The scaled dot product scores between `text_embeddings` and `image_embeddings` but passed through FLAVA's
`text_projection` and `image_projection` layers respectively. This is calculated on unmasked images and
texts.
mmm_image_logits (`torch.FloatTensor` of shape `(batch_size, num_image_patches, image_vocab_size)` or of shape`(total_masked_patches, image_vocab_size)`, *optional*, returned when `pixel_values` and `input_ids_masked` are present):
The logits for MMM image multimodal loss. Uses `book_masked_pos` to get masked patches. The flattened
output is returned when `bool_masked_pos` has some of the patches masked.
mmm_text_logits (`torch.FloatTensor` of shape `(batch_size, text_seq_length, text_vocab_size)` or of shape `(`(total_masked_seq_length, text_vocab_size)`), *optional*, returned when `pixel_values` and `input_ids_masked` are present):
The logits for MMM text multimodal loss. The flattened output is returned when `input_ids_masked` has
some of the tokens masked.
'''
def to_tuple(self) -> tuple[Any]:
pass
| 4
| 1
| 10
| 0
| 10
| 0
| 2
| 1.84
| 1
| 2
| 0
| 0
| 1
| 0
| 1
| 1
| 97
| 6
| 32
| 24
| 30
| 59
| 25
| 24
| 23
| 2
| 1
| 0
| 2
|
2,544
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/flava/modeling_flava.py
|
transformers.models.flava.modeling_flava.FlavaGlobalContrastiveHead
|
from torch import nn
import torch
class FlavaGlobalContrastiveHead(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.global_backprop_contrastive = config.global_backprop_contrastive
def forward(self, image_embeddings, text_embeddings, logit_scale):
temperature = torch.exp(logit_scale)
if not torch.distributed.is_available() or not torch.distributed.is_initialized():
labels = torch.arange(image_embeddings.size(0), device=image_embeddings.device)
image_embeddings_all = [image_embeddings]
text_embeddings_all = [text_embeddings]
else:
local_batch_size = image_embeddings.size(0)
world_size = torch.distributed.get_world_size()
if self.global_backprop_contrastive:
image_embeddings_all = torch.distributed.nn.functional.all_gather(image_embeddings)
text_embeddings_all = torch.distributed.nn.functional.all_gather(text_embeddings)
else:
image_embeddings_all = [torch.zeros_like(text_embeddings) for _ in range(world_size)]
text_embeddings_all = [torch.zeros_like(image_embeddings) for _ in range(world_size)]
torch.distributed.all_gather(image_embeddings_all, image_embeddings)
torch.distributed.all_gather(text_embeddings_all, text_embeddings)
labels = local_batch_size * torch.distributed.get_rank() + torch.arange(local_batch_size, device=image_embeddings.device)
image_embeddings_all = torch.cat(image_embeddings_all)
text_embeddings_all = torch.cat(text_embeddings_all)
logits_per_image = torch.matmul(image_embeddings, text_embeddings_all.transpose(0, 1)) * temperature
logits_per_text = torch.matmul(text_embeddings, image_embeddings_all.transpose(0, 1)) * temperature
return (logits_per_image, logits_per_text, labels)
|
class FlavaGlobalContrastiveHead(nn.Module):
def __init__(self, config):
pass
def forward(self, image_embeddings, text_embeddings, logit_scale):
pass
| 3
| 0
| 18
| 3
| 15
| 1
| 2
| 0.07
| 1
| 2
| 0
| 0
| 2
| 2
| 2
| 12
| 38
| 6
| 30
| 14
| 27
| 2
| 26
| 13
| 23
| 3
| 1
| 2
| 4
|
2,545
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/flava/modeling_flava.py
|
transformers.models.flava.modeling_flava.FlavaITMHead
|
from torch import nn
class FlavaITMHead(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.pooler = FlavaPooler(config)
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, x):
x = self.pooler(x)
x = self.seq_relationship(x)
return x
|
class FlavaITMHead(nn.Module):
def __init__(self, config):
pass
def forward(self, x):
pass
| 3
| 0
| 5
| 0
| 5
| 0
| 1
| 0
| 1
| 2
| 1
| 0
| 2
| 3
| 2
| 12
| 11
| 1
| 10
| 6
| 7
| 0
| 10
| 6
| 7
| 1
| 1
| 0
| 2
|
2,546
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/flava/modeling_flava.py
|
transformers.models.flava.modeling_flava.FlavaImageCodebook
|
from torch import nn
from .configuration_flava import FlavaConfig, FlavaImageCodebookConfig, FlavaImageConfig, FlavaMultimodalConfig, FlavaTextConfig
import torch
from typing import Any, Optional, Union
from ...utils import ModelOutput, auto_docstring, filter_out_non_signature_kwargs, logging, torch_int
from collections import OrderedDict
@auto_docstring(custom_intro="\n The FLAVA's image codebook model inspired from DALL-E's original encoder. Outputs raw hidden states and can be used\n to generate image tokens for an image based on DALL-E's vocab. Used to generate labels for MIM. Use\n `get_codebook_indices` to get image tokens for an image.\n ")
class FlavaImageCodebook(FlavaPreTrainedModel):
base_model_prefix = ''
config: FlavaImageCodebookConfig
main_input_name = 'pixel_values'
supports_gradient_checkpointing = False
def __init__(self, config: FlavaImageCodebookConfig, **kwargs: Any):
super().__init__(config)
self.config = config
self.num_groups = config.num_groups
self.input_channels = config.input_channels
self.num_blocks_per_group = config.num_blocks_per_group
self.hidden_size = config.hidden_size
self.vocab_size = config.vocab_size
num_layers = self.num_groups * self.num_blocks_per_group
output_blocks = OrderedDict()
output_blocks['relu'] = nn.ReLU()
output_blocks['conv'] = nn.Conv2d(8 * self.hidden_size, self.vocab_size, kernel_size=1, padding=0)
blocks = OrderedDict()
blocks['input'] = nn.Conv2d(self.input_channels, 1 * self.hidden_size, kernel_size=7, padding=3)
blocks['group_1'] = FlavaImageCodebookLayerGroup(self.num_blocks_per_group, num_layers, 1 * self.hidden_size, 1 * self.hidden_size)
blocks['group_2'] = FlavaImageCodebookLayerGroup(self.num_blocks_per_group, num_layers, 1 * self.hidden_size, 2 * self.hidden_size)
blocks['group_3'] = FlavaImageCodebookLayerGroup(self.num_blocks_per_group, num_layers, 2 * self.hidden_size, 4 * self.hidden_size)
blocks['group_4'] = FlavaImageCodebookLayerGroup(self.num_blocks_per_group, num_layers, 4 * self.hidden_size, 8 * self.hidden_size, use_pool=False)
blocks['output'] = nn.Sequential(output_blocks)
self.blocks = nn.Sequential(blocks)
self.post_init()
if self.config.freeze:
for param in self.parameters():
param.requires_grad = False
def get_codebook_indices(self, pixel_values: torch.Tensor) -> torch.Tensor:
f'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Codebook pixel values can be obtained using [`AutoImageProcessor`] by passing\n `return_codebook_pixels=True`. See [`FlavaImageProcessor.__call__`] for details.\n\n Examples:\n ```python\n >>> from PIL import Image\n >>> import requests\n >>> from transformers import AutoImageProcessor, FlavaImageCodebook\n\n >>> model = FlavaImageCodebook.from_pretrained("{_CHECKPOINT_FOR_CODEBOOK_DOC}")\n >>> image_processor = AutoImageProcessor.from_pretrained("{_CHECKPOINT_FOR_CODEBOOK_DOC}")\n\n >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"\n >>> image = Image.open(requests.get(url, stream=True).raw)\n\n >>> inputs = image_processor([image], return_codebook_pixels=True, return_tensors="pt")\n >>> inputs = dict(pixel_values=inputs.codebook_pixel_values)\n\n >>> outputs = model.get_codebook_indices(**inputs)\n ```\n '
z_logits = self.blocks(pixel_values)
return torch.argmax(z_logits, axis=1)
def get_codebook_probs(self, pixel_values: torch.Tensor) -> torch.Tensor:
z_logits = self.blocks(pixel_values)
return nn.Softmax(dim=1)(z_logits)
def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor:
f'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Codebook pixel values can be obtained using [`AutoImageProcessor`] by passing\n `return_codebook_pixels=True`. See [`FlavaImageProcessor.__call__`] for details.\n\n Examples:\n\n ```python\n >>> from PIL import Image\n >>> import requests\n >>> from transformers import AutoImageProcessor, FlavaImageCodebook\n\n >>> model = FlavaImageCodebook.from_pretrained("{_CHECKPOINT_FOR_CODEBOOK_DOC}")\n >>> image_processor = AutoImageProcessor.from_pretrained("{_CHECKPOINT_FOR_CODEBOOK_DOC}")\n\n >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"\n >>> image = Image.open(requests.get(url, stream=True).raw)\n\n >>> inputs = image_processor([image], return_codebook_pixels=True, return_tensors="pt")\n >>> inputs = dict(pixel_values=inputs.codebook_pixel_values)\n\n >>> outputs = model(**inputs)\n >>> print(outputs.shape)\n (1, 196)\n ```\n '
if len(pixel_values.shape) != 4:
raise ValueError(f'input shape {pixel_values.shape} is not 4d')
if pixel_values.shape[1] != self.input_channels:
raise ValueError(f'input has {pixel_values.shape[1]} channels but model built for {self.input_channels}')
return self.blocks(pixel_values)
|
@auto_docstring(custom_intro="\n The FLAVA's image codebook model inspired from DALL-E's original encoder. Outputs raw hidden states and can be used\n to generate image tokens for an image based on DALL-E's vocab. Used to generate labels for MIM. Use\n `get_codebook_indices` to get image tokens for an image.\n ")
class FlavaImageCodebook(FlavaPreTrainedModel):
def __init__(self, config: FlavaImageCodebookConfig, **kwargs: Any):
pass
def get_codebook_indices(self, pixel_values: torch.Tensor) -> torch.Tensor:
pass
def get_codebook_probs(self, pixel_values: torch.Tensor) -> torch.Tensor:
pass
def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor:
pass
| 6
| 0
| 27
| 5
| 22
| 0
| 2
| 0
| 1
| 7
| 2
| 0
| 4
| 7
| 4
| 5
| 115
| 22
| 93
| 26
| 84
| 0
| 43
| 22
| 38
| 3
| 2
| 2
| 8
|
2,547
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/flava/modeling_flava.py
|
transformers.models.flava.modeling_flava.FlavaImageCodebookBlock
|
from torch import nn
import torch
class FlavaImageCodebookBlock(nn.Module):
def __init__(self, in_size: int, out_size: int, num_layers: int, **kwargs):
super().__init__()
self.post_gain = 1 / num_layers ** 2
if in_size != out_size:
self.id_path = nn.Conv2d(in_size, out_size, kernel_size=1, padding=0)
else:
self.id_path = nn.Identity()
self.res_path = FlavaImageCodebookResPath(in_size, out_size)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.id_path(x) + self.post_gain * self.res_path(x)
|
class FlavaImageCodebookBlock(nn.Module):
def __init__(self, in_size: int, out_size: int, num_layers: int, **kwargs):
pass
def forward(self, x: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 7
| 2
| 5
| 0
| 2
| 0
| 1
| 4
| 1
| 0
| 2
| 3
| 2
| 12
| 15
| 4
| 11
| 6
| 8
| 0
| 10
| 6
| 7
| 2
| 1
| 1
| 3
|
2,548
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/flava/modeling_flava.py
|
transformers.models.flava.modeling_flava.FlavaImageCodebookLayerGroup
|
from torch import nn
import torch
from collections import OrderedDict
class FlavaImageCodebookLayerGroup(nn.Module):
def __init__(self, num_blocks: int, num_layers: int, in_size: int, out_size: int, use_pool: bool=True):
super().__init__()
blocks = OrderedDict()
for i in range(num_blocks):
if i == 0:
blocks[f'block_{i + 1}'] = FlavaImageCodebookBlock(in_size, out_size, num_layers)
else:
blocks[f'block_{i + 1}'] = FlavaImageCodebookBlock(out_size, out_size, num_layers)
if use_pool:
blocks['pool'] = nn.MaxPool2d(kernel_size=2)
self.group = nn.Sequential(blocks)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.group(x)
|
class FlavaImageCodebookLayerGroup(nn.Module):
def __init__(self, num_blocks: int, num_layers: int, in_size: int, out_size: int, use_pool: bool=True):
pass
def forward(self, x: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 8
| 1
| 7
| 0
| 3
| 0
| 1
| 7
| 1
| 0
| 2
| 1
| 2
| 12
| 17
| 3
| 14
| 6
| 11
| 0
| 13
| 6
| 10
| 4
| 1
| 2
| 5
|
2,549
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/flava/modeling_flava.py
|
transformers.models.flava.modeling_flava.FlavaImageCodebookResPath
|
from torch import nn
import torch
from collections import OrderedDict
class FlavaImageCodebookResPath(nn.Module):
def __init__(self, in_size: int, out_size: int, **kwargs):
super().__init__()
hid_size = out_size // 4
path = OrderedDict()
path['relu_1'] = nn.ReLU()
path['conv_1'] = nn.Conv2d(in_size, hid_size, kernel_size=3, padding=1)
path['relu_2'] = nn.ReLU()
path['conv_2'] = nn.Conv2d(hid_size, hid_size, kernel_size=3, padding=1)
path['relu_3'] = nn.ReLU()
path['conv_3'] = nn.Conv2d(hid_size, hid_size, kernel_size=3, padding=1)
path['relu_4'] = nn.ReLU()
path['conv_4'] = nn.Conv2d(hid_size, out_size, kernel_size=1, padding=0)
self.path = nn.Sequential(path)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.path(x)
|
class FlavaImageCodebookResPath(nn.Module):
def __init__(self, in_size: int, out_size: int, **kwargs):
pass
def forward(self, x: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 9
| 1
| 8
| 0
| 1
| 0
| 1
| 4
| 0
| 0
| 2
| 1
| 2
| 12
| 19
| 3
| 16
| 6
| 13
| 0
| 16
| 6
| 13
| 1
| 1
| 0
| 2
|
2,550
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/flava/modeling_flava.py
|
transformers.models.flava.modeling_flava.FlavaImageEmbeddings
|
from torch import nn
from .configuration_flava import FlavaConfig, FlavaImageCodebookConfig, FlavaImageConfig, FlavaMultimodalConfig, FlavaTextConfig
import torch
from typing import Any, Optional, Union
from ...utils import ModelOutput, auto_docstring, filter_out_non_signature_kwargs, logging, torch_int
class FlavaImageEmbeddings(nn.Module):
"""
Construct the CLS token, position and patch embeddings. Optionally, also the mask token.
"""
def __init__(self, config: FlavaImageConfig, use_mask_token: bool=False) -> None:
super().__init__()
use_mask_token = use_mask_token or config.mask_token
self.cls_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size))
self.mask_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size)) if use_mask_token else None
self.patch_embeddings = PatchEmbeddings(image_size=config.image_size, patch_size=config.patch_size, num_channels=config.num_channels, embed_dim=config.hidden_size)
num_patches = self.patch_embeddings.num_patches
self.position_embeddings = nn.Parameter(torch.zeros(1, num_patches + 1, config.hidden_size))
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.patch_size = config.patch_size
self.config = config
def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor:
"""
This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution
images. This method is also adapted to support torch.jit tracing.
Adapted from:
- https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174-L194, and
- https://github.com/facebookresearch/dinov2/blob/e1277af2ba9496fbadf7aec6eba56e8d882d1e35/dinov2/models/vision_transformer.py#L179-L211
"""
num_patches = embeddings.shape[1] - 1
num_positions = self.position_embeddings.shape[1] - 1
if not torch.jit.is_tracing() and num_patches == num_positions and (height == width):
return self.position_embeddings
class_pos_embed = self.position_embeddings[:, :1]
patch_pos_embed = self.position_embeddings[:, 1:]
dim = embeddings.shape[-1]
new_height = height // self.patch_size
new_width = width // self.patch_size
sqrt_num_positions = torch_int(num_positions ** 0.5)
patch_pos_embed = patch_pos_embed.reshape(1, sqrt_num_positions, sqrt_num_positions, dim)
patch_pos_embed = patch_pos_embed.permute(0, 3, 1, 2)
patch_pos_embed = nn.functional.interpolate(patch_pos_embed, size=(new_height, new_width), mode='bicubic', align_corners=False)
patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
return torch.cat((class_pos_embed, patch_pos_embed), dim=1)
def forward(self, pixel_values: torch.Tensor, bool_masked_pos: Optional[torch.BoolTensor]=None, interpolate_pos_encoding: bool=False) -> torch.Tensor:
batch_size, num_channels, height, width = pixel_values.shape
embeddings = self.patch_embeddings(pixel_values, interpolate_pos_encoding=interpolate_pos_encoding)
batch_size, seq_len, _ = embeddings.size()
if bool_masked_pos is not None:
mask_tokens = self.mask_token.expand(batch_size, seq_len, -1)
if bool_masked_pos.dim() == 3:
bool_masked_pos = bool_masked_pos.view(bool_masked_pos.size(0), -1)
mask = bool_masked_pos.unsqueeze(-1).type_as(mask_tokens)
embeddings = embeddings * (1.0 - mask) + mask_tokens * mask
cls_tokens = self.cls_token.expand(batch_size, -1, -1)
embeddings = torch.cat((cls_tokens, embeddings), dim=1)
if interpolate_pos_encoding:
embeddings = embeddings + self.interpolate_pos_encoding(embeddings, height, width)
else:
embeddings = embeddings + self.position_embeddings
embeddings = self.dropout(embeddings)
return embeddings
|
class FlavaImageEmbeddings(nn.Module):
'''
Construct the CLS token, position and patch embeddings. Optionally, also the mask token.
'''
def __init__(self, config: FlavaImageConfig, use_mask_token: bool=False) -> None:
pass
def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor:
'''
This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution
images. This method is also adapted to support torch.jit tracing.
Adapted from:
- https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174-L194, and
- https://github.com/facebookresearch/dinov2/blob/e1277af2ba9496fbadf7aec6eba56e8d882d1e35/dinov2/models/vision_transformer.py#L179-L211
'''
pass
def forward(self, pixel_values: torch.Tensor, bool_masked_pos: Optional[torch.BoolTensor]=None, interpolate_pos_encoding: bool=False) -> torch.Tensor:
pass
| 4
| 2
| 29
| 5
| 20
| 4
| 3
| 0.26
| 1
| 6
| 2
| 0
| 3
| 7
| 3
| 13
| 96
| 19
| 61
| 31
| 52
| 16
| 45
| 26
| 41
| 4
| 1
| 2
| 8
|
2,551
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/flava/modeling_flava.py
|
transformers.models.flava.modeling_flava.FlavaImageModel
|
from torch import nn
from .configuration_flava import FlavaConfig, FlavaImageCodebookConfig, FlavaImageConfig, FlavaMultimodalConfig, FlavaTextConfig
import torch
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling
from typing import Any, Optional, Union
from ...utils import ModelOutput, auto_docstring, filter_out_non_signature_kwargs, logging, torch_int
@auto_docstring
class FlavaImageModel(FlavaPreTrainedModel):
config: FlavaImageConfig
base_model_prefix = 'flava.image_model'
main_input_name = 'pixel_values'
def __init__(self, config: FlavaImageConfig, add_pooling_layer: bool=True):
"""
add_pooling_layer (bool, *optional*, defaults to `True`):
Whether to add a pooling layer
"""
super().__init__(config)
self.config = config
self.embeddings = FlavaImageEmbeddings(config)
self.encoder = FlavaEncoder(config)
self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.pooler = FlavaPooler(config) if add_pooling_layer else None
self.post_init()
def get_input_embeddings(self) -> nn.Module:
return self.embeddings.patch_embeddings
def set_input_embeddings(self, value: nn.Module):
self.embeddings.patch_embeddings = value
def _prune_heads(self, heads_to_prune: dict[int, list[int]]) -> None:
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@auto_docstring
def forward(self, pixel_values: Optional[torch.Tensor]=None, bool_masked_pos: Optional[torch.BoolTensor]=None, interpolate_pos_encoding: Optional[bool]=None, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutputWithPooling]:
"""
bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, image_num_patches)`):
Boolean masked positions. Indicates which patches are masked (1) and which aren't (0).
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('You have to specify pixel_values')
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
embedding_output = self.embeddings(pixel_values, bool_masked_pos=bool_masked_pos, interpolate_pos_encoding=interpolate_pos_encoding)
encoder_outputs = self.encoder(embedding_output, attention_mask=attention_mask, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
sequence_output = encoder_outputs[0]
sequence_output = self.layernorm(sequence_output)
pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
if not return_dict:
return (sequence_output, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPooling(last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions)
|
@auto_docstring
class FlavaImageModel(FlavaPreTrainedModel):
def __init__(self, config: FlavaImageConfig, add_pooling_layer: bool=True):
'''
add_pooling_layer (bool, *optional*, defaults to `True`):
Whether to add a pooling layer
'''
pass
def get_input_embeddings(self) -> nn.Module:
pass
def set_input_embeddings(self, value: nn.Module):
pass
def _prune_heads(self, heads_to_prune: dict[int, list[int]]) -> None:
'''
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
'''
pass
@auto_docstring
def forward(self, pixel_values: Optional[torch.Tensor]=None, bool_masked_pos: Optional[torch.BoolTensor]=None, interpolate_pos_encoding: Optional[bool]=None, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutputWithPooling]:
'''
bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, image_num_patches)`):
Boolean masked positions. Indicates which patches are masked (1) and which aren't (0).
'''
pass
| 8
| 3
| 15
| 2
| 11
| 2
| 3
| 0.15
| 1
| 11
| 5
| 0
| 5
| 5
| 5
| 6
| 93
| 15
| 68
| 30
| 44
| 10
| 34
| 19
| 28
| 7
| 2
| 1
| 13
|
2,552
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/flava/modeling_flava.py
|
transformers.models.flava.modeling_flava.FlavaIntermediate
|
import torch
from torch import nn
from ...activations import ACT2FN
class FlavaIntermediate(nn.Module):
def __init__(self, config: FlavaPossibleConfigs) -> None:
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
|
class FlavaIntermediate(nn.Module):
def __init__(self, config: FlavaPossibleConfigs) -> None:
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 6
| 1
| 6
| 0
| 2
| 0.08
| 1
| 3
| 0
| 0
| 2
| 2
| 2
| 12
| 15
| 2
| 12
| 5
| 9
| 1
| 11
| 5
| 8
| 2
| 1
| 1
| 3
|
2,553
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/flava/modeling_flava.py
|
transformers.models.flava.modeling_flava.FlavaLayer
|
from torch import nn
from typing import Any, Optional, Union
from ...modeling_layers import GradientCheckpointingLayer
import torch
class FlavaLayer(GradientCheckpointingLayer):
"""This corresponds to the Block class in the timm implementation."""
def __init__(self, config: FlavaPossibleConfigs) -> None:
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = FlavaAttention(config)
self.intermediate = FlavaIntermediate(config)
self.output = FlavaOutput(config)
self.layernorm_before = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.layernorm_after = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, output_attentions: bool=False) -> Union[tuple[torch.Tensor, torch.Tensor], tuple[torch.Tensor]]:
self_attention_outputs = self.attention(self.layernorm_before(hidden_states), attention_mask=attention_mask, head_mask=head_mask, output_attentions=output_attentions)
attention_output = self_attention_outputs[0]
outputs = self_attention_outputs[1:]
hidden_states = attention_output + hidden_states
layer_output = self.layernorm_after(hidden_states)
layer_output = self.intermediate(layer_output)
layer_output = self.output(layer_output, hidden_states)
outputs = (layer_output,) + outputs
return outputs
|
class FlavaLayer(GradientCheckpointingLayer):
'''This corresponds to the Block class in the timm implementation.'''
def __init__(self, config: FlavaPossibleConfigs) -> None:
pass
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, output_attentions: bool=False) -> Union[tuple[torch.Tensor, torch.Tensor], tuple[torch.Tensor]]:
pass
| 3
| 1
| 20
| 3
| 15
| 3
| 1
| 0.23
| 1
| 6
| 3
| 0
| 2
| 7
| 2
| 12
| 44
| 8
| 31
| 20
| 22
| 7
| 20
| 14
| 17
| 1
| 1
| 0
| 2
|
2,554
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/flava/modeling_flava.py
|
transformers.models.flava.modeling_flava.FlavaLosses
|
from dataclasses import dataclass
from typing import Any, Optional, Union
from ...utils import ModelOutput, auto_docstring, filter_out_non_signature_kwargs, logging, torch_int
import torch
@dataclass
@auto_docstring(custom_intro='\n Class representing pretraining losses from FLAVA model\n ')
class FlavaLosses(ModelOutput):
"""
mim (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `mim_labels` and `pixel_values` are present, `input_ids_masked` is absent and `mim_weight` > 0.):
Masked Image Modeling loss as used in BeIT calculated only for unimodal image data.
mlm (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `mlm_labels` and `input_ids_masked` are present, `pixel_values` is absent and `mlm_weight` > 0.):
Masked Language Modeling loss as used in BERT calculated only for unimodal text data.
itm (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `itm_labels`, `input_ids_masked`, `pixel_values` are present and `itm_weight` > 0.):
Image Text Matching (ITM) loss calculated for paired image-text data. Note that ITM loss is calculated on
masked pairs in FLAVA.
global_contrastive (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `input_ids` and `pixel_values` are present and `global_contrastive_weight` > 0.):
Contrastive loss for image-text similarity similar to CLIP but calculated globally for paired image-text
data. This is calculated on unmasked images and texts.
mmm_image (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `mim_labels`, `pixel_values` and `input_ids_masked` are present and `mmm_image_weight` > 0.):
Masked Multimodal Modeling loss's image component calculated on paired image-text data.
mmm_text (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `mlm_labels`, `pixel_values` and `input_ids_masked` are present and `mmm_text_weight` > 0.):
Masked Multimodal Modeling loss's text component calculated on paired image-text data.
"""
mim: Optional[torch.FloatTensor] = None
mlm: Optional[torch.FloatTensor] = None
itm: Optional[torch.FloatTensor] = None
global_contrastive: Optional[torch.FloatTensor] = None
mmm_image: Optional[torch.FloatTensor] = None
mmm_text: Optional[torch.FloatTensor] = None
def all_none(self) -> bool:
all_none = True
for v in self.values():
if v is not None:
all_none = False
break
return all_none
|
@dataclass
@auto_docstring(custom_intro='\n Class representing pretraining losses from FLAVA model\n ')
class FlavaLosses(ModelOutput):
'''
mim (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `mim_labels` and `pixel_values` are present, `input_ids_masked` is absent and `mim_weight` > 0.):
Masked Image Modeling loss as used in BeIT calculated only for unimodal image data.
mlm (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `mlm_labels` and `input_ids_masked` are present, `pixel_values` is absent and `mlm_weight` > 0.):
Masked Language Modeling loss as used in BERT calculated only for unimodal text data.
itm (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `itm_labels`, `input_ids_masked`, `pixel_values` are present and `itm_weight` > 0.):
Image Text Matching (ITM) loss calculated for paired image-text data. Note that ITM loss is calculated on
masked pairs in FLAVA.
global_contrastive (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `input_ids` and `pixel_values` are present and `global_contrastive_weight` > 0.):
Contrastive loss for image-text similarity similar to CLIP but calculated globally for paired image-text
data. This is calculated on unmasked images and texts.
mmm_image (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `mim_labels`, `pixel_values` and `input_ids_masked` are present and `mmm_image_weight` > 0.):
Masked Multimodal Modeling loss's image component calculated on paired image-text data.
mmm_text (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `mlm_labels`, `pixel_values` and `input_ids_masked` are present and `mmm_text_weight` > 0.):
Masked Multimodal Modeling loss's text component calculated on paired image-text data.
'''
def all_none(self) -> bool:
pass
| 4
| 1
| 7
| 0
| 7
| 0
| 3
| 1.21
| 1
| 1
| 0
| 0
| 1
| 0
| 1
| 1
| 34
| 3
| 14
| 10
| 12
| 17
| 14
| 10
| 12
| 3
| 1
| 2
| 3
|
2,555
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/flava/modeling_flava.py
|
transformers.models.flava.modeling_flava.FlavaMaskedPredictionHead
|
from torch import nn
import torch
class FlavaMaskedPredictionHead(nn.Module):
def __init__(self, config, weight=None):
super().__init__()
self.config = config
self.transform = FlavaPredictionHeadTransform(config)
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
if weight is not None:
self.decoder.weight = weight
self.decoder.bias = self.bias
def _tie_weights(self):
self.decoder.bias = self.bias
def forward(self, x):
x = self.transform(x)
x = self.decoder(x)
return x
|
class FlavaMaskedPredictionHead(nn.Module):
def __init__(self, config, weight=None):
pass
def _tie_weights(self):
pass
def forward(self, x):
pass
| 4
| 0
| 6
| 0
| 5
| 0
| 1
| 0.06
| 1
| 2
| 1
| 0
| 3
| 4
| 3
| 13
| 20
| 3
| 16
| 8
| 12
| 1
| 16
| 8
| 12
| 2
| 1
| 1
| 4
|
2,556
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/flava/modeling_flava.py
|
transformers.models.flava.modeling_flava.FlavaModel
|
from torch import nn
from .configuration_flava import FlavaConfig, FlavaImageCodebookConfig, FlavaImageConfig, FlavaMultimodalConfig, FlavaTextConfig
import torch
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling
from typing import Any, Optional, Union
from ...utils import ModelOutput, auto_docstring, filter_out_non_signature_kwargs, logging, torch_int
@auto_docstring
class FlavaModel(FlavaPreTrainedModel):
config: FlavaConfig
def __init__(self, config: FlavaConfig):
super().__init__(config)
if not isinstance(config.text_config, FlavaTextConfig):
raise TypeError(f'config.text_config is expected to be of type FlavaTextConfig but is of type {type(config.text_config)}.')
if not isinstance(config.image_config, FlavaImageConfig):
raise TypeError(f'config.image_config is expected to be of type FlavaImageConfig but is of type {type(config.image_config)}.')
if not isinstance(config.multimodal_config, FlavaMultimodalConfig):
raise TypeError('config.multimodal_config is expected to be of type FlavaMultimodalConfig but ' + f'is of type {type(config.multimodal_config)}.')
text_config = config.text_config
image_config = config.image_config
multimodal_config = config.multimodal_config
self.projection_dim = config.projection_dim
self.text_hidden_size = text_config.hidden_size
self.image_hidden_size = image_config.hidden_size
self.mm_hidden_size = multimodal_config.hidden_size
self.text_model = FlavaTextModel(text_config)
self.image_model = FlavaImageModel(image_config)
self.multimodal_model = FlavaMultimodalModel(multimodal_config)
self.image_projection = nn.Linear(self.image_hidden_size, self.projection_dim)
self.text_projection = nn.Linear(self.text_hidden_size, self.projection_dim)
self.logit_scale = nn.Parameter(torch.tensor(self.config.logit_scale_init_value))
self.image_to_mm_projection = nn.Linear(self.image_hidden_size, self.mm_hidden_size)
self.text_to_mm_projection = nn.Linear(self.text_hidden_size, self.mm_hidden_size)
self.post_init()
@filter_out_non_signature_kwargs()
@auto_docstring
def get_text_features(self, input_ids: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None) -> torch.FloatTensor:
"""
input_ids (`torch.LongTensor` of shape `(batch_size, text_seq_length)`):
Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See
[`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input
IDs?](../glossary#input-ids)
token_type_ids (`torch.LongTensor` of shape `(batch_size, text_seq_length)`, *optional*):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
1]`:
- 0 corresponds to a *sentence A* token,
- 1 corresponds to a *sentence B* token.
[What are token type IDs?](../glossary#token-type-ids)
Returns:
text_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by
applying the projection layer to the pooled output of [`FlavaTextModel`].
Examples:
```python
>>> import torch
>>> from transformers import AutoProcessor, FlavaModel
>>> model = FlavaModel.from_pretrained("{0}")
>>> processor = AutoProcessor.from_pretrained("{0}")
>>> inputs = processor(
... text=["a photo of a cat", "a photo of a dog"], max_length=77, padding="max_length", return_tensors="pt"
... )
>>> with torch.inference_mode():
... text_features = model.get_text_features(**inputs)
```
"""
text_outputs: BaseModelOutputWithPooling = self.text_model(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids)
pooled_output = text_outputs.last_hidden_state
text_features = self.text_projection(pooled_output)
return text_features
@filter_out_non_signature_kwargs()
@auto_docstring
def get_image_features(self, pixel_values: torch.Tensor, bool_masked_pos: Optional[torch.BoolTensor]=None, interpolate_pos_encoding: Optional[bool]=None, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None) -> torch.FloatTensor:
"""
bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, image_num_patches)`):
Boolean masked positions. Indicates which patches are masked (1) and which aren't (0).
Returns:
image_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The image embeddings obtained by
applying the projection layer to the pooled output of [`FlavaImageModel`].
Examples:
```python
>>> import torch
>>> from transformers import AutoProcessor, FlavaModel
>>> from transformers.image_utils import load_image
>>> model = FlavaModel.from_pretrained("{0}")
>>> processor = AutoProcessor.from_pretrained("{0}")
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = load_image(url)
>>> inputs = processor(images=image, return_tensors="pt")
>>> with torch.inference_mode():
... image_features = model.get_image_features(**inputs)
```
"""
image_outputs: BaseModelOutputWithPooling = self.image_model(pixel_values=pixel_values, bool_masked_pos=bool_masked_pos, attention_mask=attention_mask, head_mask=head_mask, interpolate_pos_encoding=interpolate_pos_encoding)
pooled_output = image_outputs.last_hidden_state
image_features = self.image_projection(pooled_output)
return image_features
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, pixel_values: Optional[torch.FloatTensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, bool_masked_pos: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, image_attention_mask: Optional[torch.Tensor]=None, skip_multimodal_encoder: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: bool=True, return_dict: Optional[bool]=None) -> Union[tuple, FlavaOutput]:
"""
input_ids (`torch.LongTensor` of shape `(batch_size, image_num_patches + text_seq_len)`):
Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See
[`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input
IDs?](../glossary#input-ids)
token_type_ids (`torch.LongTensor` of shape `(batch_size, image_num_patches + text_seq_len)`, *optional*):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
1]`:
- 0 corresponds to a *sentence A* token,
- 1 corresponds to a *sentence B* token.
[What are token type IDs?](../glossary#token-type-ids)
bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, image_num_patches)`):
Boolean masked positions. Indicates which patches are masked (1) and which aren't (0).
image_attention_mask (`torch.Tensor` of shape `(batch_size, image_num_patches)`, *optional*):
Mask to avoid performing attention on padding pixel values for image inputs. Mask values selected in `[0, 1]`:
- 1 for pixel values that are real (i.e., **not masked**),
- 0 for pixel values that are padding (i.e., **masked**).
skip_multimodal_encoder (*bool*, *optional*):
Skip any calculations for multimodal encoder. Useful if multimodal encoding is not going to be used.
Examples:
```python
>>> from PIL import Image
>>> import requests
>>> from transformers import AutoProcessor, FlavaModel
>>> model = FlavaModel.from_pretrained("facebook/flava-full")
>>> processor = AutoProcessor.from_pretrained("facebook/flava-full")
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> inputs = processor(text=["a photo of a cat"], images=image, return_tensors="pt", padding=True)
>>> outputs = model(**inputs)
>>> image_embeddings = outputs.image_embeddings
>>> text_embeddings = outputs.text_embeddings
>>> multimodal_embeddings = outputs.multimodal_embeddings
>>> outputs.image_embeddings.shape
torch.Size([1, 197, 768])
>>> text_embeddings.shape
torch.Size([1, 7, 768])
>>> multimodal_embeddings.shape
torch.Size([1, 205, 768])
```
"""
return_dict = return_dict if return_dict is not None else self.config.return_dict
if not output_hidden_states:
raise ValueError('FLAVA model requires hidden states to work. Please set `output_hidden_states=True`')
image_embeddings = None
image_states = None
image_mm_projection = None
image_output = None
if pixel_values is not None:
image_output = self.image_model(pixel_values=pixel_values, bool_masked_pos=bool_masked_pos, attention_mask=image_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
image_embeddings, image_states = (image_output[0], image_output[2])
image_mm_projection = self.image_to_mm_projection(image_states[-1])
text_embeddings = None
text_states = None
text_mm_projection = None
text_output = None
if input_ids is not None:
text_output = self.text_model(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, token_type_ids=token_type_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
text_embeddings, text_states = (text_output[0], text_output[2])
text_mm_projection = self.text_to_mm_projection(text_states[-1])
multimodal_embeddings = None
multimodal_output = None
if image_mm_projection is not None and text_mm_projection is not None and (not skip_multimodal_encoder):
if attention_mask is not None:
batch_size, seq_len, _ = image_mm_projection.shape
if self.multimodal_model.use_cls_token:
seq_len += 1
attention_mask_image = torch.ones(batch_size, seq_len, device=image_mm_projection.device)
attention_multimodal = torch.cat([attention_mask_image, attention_mask], dim=1)
else:
attention_multimodal = None
multimodal_input = torch.cat([image_mm_projection, text_mm_projection], dim=1)
multimodal_output = self.multimodal_model(multimodal_input, attention_mask=attention_multimodal, return_dict=return_dict)
multimodal_embeddings = multimodal_output[0]
if not return_dict:
return (image_embeddings, image_output, text_embeddings, text_output, multimodal_embeddings, multimodal_output)
return FlavaModelOutput(image_embeddings=image_embeddings, image_output=image_output, text_embeddings=text_embeddings, text_output=text_output, multimodal_embeddings=multimodal_embeddings, multimodal_output=multimodal_output)
|
@auto_docstring
class FlavaModel(FlavaPreTrainedModel):
def __init__(self, config: FlavaConfig):
pass
@filter_out_non_signature_kwargs()
@auto_docstring
def get_text_features(self, input_ids: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None) -> torch.FloatTensor:
'''
input_ids (`torch.LongTensor` of shape `(batch_size, text_seq_length)`):
Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See
[`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input
IDs?](../glossary#input-ids)
token_type_ids (`torch.LongTensor` of shape `(batch_size, text_seq_length)`, *optional*):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
1]`:
- 0 corresponds to a *sentence A* token,
- 1 corresponds to a *sentence B* token.
[What are token type IDs?](../glossary#token-type-ids)
Returns:
text_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by
applying the projection layer to the pooled output of [`FlavaTextModel`].
Examples:
```python
>>> import torch
>>> from transformers import AutoProcessor, FlavaModel
>>> model = FlavaModel.from_pretrained("{0}")
>>> processor = AutoProcessor.from_pretrained("{0}")
>>> inputs = processor(
... text=["a photo of a cat", "a photo of a dog"], max_length=77, padding="max_length", return_tensors="pt"
... )
>>> with torch.inference_mode():
... text_features = model.get_text_features(**inputs)
```
'''
pass
@filter_out_non_signature_kwargs()
@auto_docstring
def get_image_features(self, pixel_values: torch.Tensor, bool_masked_pos: Optional[torch.BoolTensor]=None, interpolate_pos_encoding: Optional[bool]=None, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None) -> torch.FloatTensor:
'''
bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, image_num_patches)`):
Boolean masked positions. Indicates which patches are masked (1) and which aren't (0).
Returns:
image_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The image embeddings obtained by
applying the projection layer to the pooled output of [`FlavaImageModel`].
Examples:
```python
>>> import torch
>>> from transformers import AutoProcessor, FlavaModel
>>> from transformers.image_utils import load_image
>>> model = FlavaModel.from_pretrained("{0}")
>>> processor = AutoProcessor.from_pretrained("{0}")
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = load_image(url)
>>> inputs = processor(images=image, return_tensors="pt")
>>> with torch.inference_mode():
... image_features = model.get_image_features(**inputs)
```
'''
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, pixel_values: Optional[torch.FloatTensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, bool_masked_pos: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, image_attention_mask: Optional[torch.Tensor]=None, skip_multimodal_encoder: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: bool=True, return_dict: Optional[bool]=None) -> Union[tuple, FlavaOutput]:
'''
input_ids (`torch.LongTensor` of shape `(batch_size, image_num_patches + text_seq_len)`):
Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See
[`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input
IDs?](../glossary#input-ids)
token_type_ids (`torch.LongTensor` of shape `(batch_size, image_num_patches + text_seq_len)`, *optional*):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
1]`:
- 0 corresponds to a *sentence A* token,
- 1 corresponds to a *sentence B* token.
[What are token type IDs?](../glossary#token-type-ids)
bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, image_num_patches)`):
Boolean masked positions. Indicates which patches are masked (1) and which aren't (0).
image_attention_mask (`torch.Tensor` of shape `(batch_size, image_num_patches)`, *optional*):
Mask to avoid performing attention on padding pixel values for image inputs. Mask values selected in `[0, 1]`:
- 1 for pixel values that are real (i.e., **not masked**),
- 0 for pixel values that are padding (i.e., **masked**).
skip_multimodal_encoder (*bool*, *optional*):
Skip any calculations for multimodal encoder. Useful if multimodal encoding is not going to be used.
Examples:
```python
>>> from PIL import Image
>>> import requests
>>> from transformers import AutoProcessor, FlavaModel
>>> model = FlavaModel.from_pretrained("facebook/flava-full")
>>> processor = AutoProcessor.from_pretrained("facebook/flava-full")
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> inputs = processor(text=["a photo of a cat"], images=image, return_tensors="pt", padding=True)
>>> outputs = model(**inputs)
>>> image_embeddings = outputs.image_embeddings
>>> text_embeddings = outputs.text_embeddings
>>> multimodal_embeddings = outputs.multimodal_embeddings
>>> outputs.image_embeddings.shape
torch.Size([1, 197, 768])
>>> text_embeddings.shape
torch.Size([1, 7, 768])
>>> multimodal_embeddings.shape
torch.Size([1, 205, 768])
```
'''
pass
| 11
| 3
| 64
| 10
| 48
| 7
| 4
| 0.15
| 1
| 15
| 9
| 0
| 4
| 12
| 4
| 5
| 267
| 42
| 198
| 78
| 155
| 29
| 74
| 41
| 69
| 9
| 2
| 3
| 15
|
2,557
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/flava/modeling_flava.py
|
transformers.models.flava.modeling_flava.FlavaModelOutput
|
from ...utils import ModelOutput, auto_docstring, filter_out_non_signature_kwargs, logging, torch_int
from dataclasses import dataclass
import torch
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling
from typing import Any, Optional, Union
@dataclass
@auto_docstring(custom_intro="\n Output from FlavaModel containing embeddings and outputs from individual encoders.\n\n Note that `image_embeddings` and `text_embeddigns` returned are similar to pooled output returned from a\n transformer. If you want embeddings for contrastive loss or retrieval use a FLAVA model's `image_projection` and\n `text_projection` layers on `image_embeddings` and `text_embeddings` respectively.\n ")
class FlavaModelOutput(ModelOutput):
"""
image_embeddings (`torch.FloatTensor` of shape `(batch_size, output_dim)`, *optional*, returned when `pixel_values` are present):
The image embeddings which are basically the pooled output of [`FlavaImageModel`].
image_output (`BaseModelOutputWithPooling`, *optional*, returned when `pixel_values` are present):
The output of the [`FlavaImageModel`].
text_embeddings (`torch.FloatTensor` of shape `(batch_size, output_dim)`, *optional*, returned when `input_ids` are present):
The text embeddings which are basically the pooled output of [`FlavaTextModel`].
text_output (`BaseModelOutputWithPooling`, *optional*, returned when `input_ids` are present):
The output of the [`FlavaTextModel`].
multimodal_embeddings (`torch.FloatTensor` of shape `(batch_size, output_dim)`, *optional*, returned when `input_ids` and `pixel_values` are present and `skip_multimodal_encoder` is `None` or `False`):
The multimodal embeddings which are basically the pooled output of [`FlavaTextModel`].
multimodal_output (`BaseModelOutputWithPooling`, returned when `input_ids` and `pixel_values` are present and `skip_multimodal_encoder` is `None` or `False`):
The output of the [`FlavaMultimodalModel`].
"""
image_embeddings: Optional[torch.FloatTensor] = None
image_output: Optional[BaseModelOutputWithPooling] = None
text_embeddings: Optional[torch.FloatTensor] = None
text_output: Optional[BaseModelOutputWithPooling] = None
multimodal_embeddings: Optional[torch.FloatTensor] = None
multimodal_output: Optional[BaseModelOutputWithPooling] = None
def to_tuple(self) -> tuple[Any]:
return tuple((self[k] if k not in ['text_output', 'image_output', 'multimodal_output'] else getattr(self, k).to_tuple() for k in self.keys()))
|
@dataclass
@auto_docstring(custom_intro="\n Output from FlavaModel containing embeddings and outputs from individual encoders.\n\n Note that `image_embeddings` and `text_embeddigns` returned are similar to pooled output returned from a\n transformer. If you want embeddings for contrastive loss or retrieval use a FLAVA model's `image_projection` and\n `text_projection` layers on `image_embeddings` and `text_embeddings` respectively.\n ")
class FlavaModelOutput(ModelOutput):
'''
image_embeddings (`torch.FloatTensor` of shape `(batch_size, output_dim)`, *optional*, returned when `pixel_values` are present):
The image embeddings which are basically the pooled output of [`FlavaImageModel`].
image_output (`BaseModelOutputWithPooling`, *optional*, returned when `pixel_values` are present):
The output of the [`FlavaImageModel`].
text_embeddings (`torch.FloatTensor` of shape `(batch_size, output_dim)`, *optional*, returned when `input_ids` are present):
The text embeddings which are basically the pooled output of [`FlavaTextModel`].
text_output (`BaseModelOutputWithPooling`, *optional*, returned when `input_ids` are present):
The output of the [`FlavaTextModel`].
multimodal_embeddings (`torch.FloatTensor` of shape `(batch_size, output_dim)`, *optional*, returned when `input_ids` and `pixel_values` are present and `skip_multimodal_encoder` is `None` or `False`):
The multimodal embeddings which are basically the pooled output of [`FlavaTextModel`].
multimodal_output (`BaseModelOutputWithPooling`, returned when `input_ids` and `pixel_values` are present and `skip_multimodal_encoder` is `None` or `False`):
The output of the [`FlavaMultimodalModel`].
'''
def to_tuple(self) -> tuple[Any]:
pass
| 4
| 1
| 5
| 0
| 5
| 0
| 2
| 1.58
| 1
| 2
| 0
| 0
| 1
| 0
| 1
| 1
| 35
| 4
| 12
| 8
| 10
| 19
| 9
| 8
| 7
| 2
| 1
| 0
| 2
|
2,558
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/flava/modeling_flava.py
|
transformers.models.flava.modeling_flava.FlavaMultimodalModel
|
from torch import nn
from ...utils import ModelOutput, auto_docstring, filter_out_non_signature_kwargs, logging, torch_int
from typing import Any, Optional, Union
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling
import torch
from .configuration_flava import FlavaConfig, FlavaImageCodebookConfig, FlavaImageConfig, FlavaMultimodalConfig, FlavaTextConfig
@auto_docstring
class FlavaMultimodalModel(FlavaPreTrainedModel):
config: FlavaMultimodalConfig
base_model_prefix = 'flava.multimodal_model'
main_input_name = 'hidden_states'
def __init__(self, config: FlavaMultimodalConfig, add_pooling_layer=True):
"""
add_pooling_layer (bool, *optional*, defaults to `True`):
Whether to add a pooling layer
"""
super().__init__(config)
self.config = config
self.use_cls_token = self.config.use_cls_token
if self.use_cls_token:
self.cls_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size))
self.encoder = FlavaEncoder(config)
self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.pooler = FlavaPooler(config) if add_pooling_layer else None
self.post_init()
def _prune_heads(self, heads_to_prune: dict[int, list[int]]) -> None:
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@auto_docstring
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutputWithPooling]:
"""
hidden_states (`torch.FloatTensor` of shape `(batch_size, image_num_patches + text_seq_len, hidden_size)`):
The concatenated hidden states of unimodal encoders.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
batch_size, seq_length, _ = hidden_states.size()
if self.use_cls_token:
cls_tokens = self.cls_token.expand(batch_size, -1, -1)
hidden_states = torch.cat((cls_tokens, hidden_states), dim=1)
seq_length += 1
if attention_mask is None:
attention_mask = torch.ones((batch_size, seq_length), device=hidden_states.device)
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, (batch_size, seq_length), hidden_states.device)
encoder_outputs = self.encoder(hidden_states, attention_mask=extended_attention_mask, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
sequence_output = encoder_outputs[0]
sequence_output = self.layernorm(sequence_output)
pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
if not return_dict:
return (sequence_output, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPooling(last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions)
|
@auto_docstring
class FlavaMultimodalModel(FlavaPreTrainedModel):
def __init__(self, config: FlavaMultimodalConfig, add_pooling_layer=True):
'''
add_pooling_layer (bool, *optional*, defaults to `True`):
Whether to add a pooling layer
'''
pass
def _prune_heads(self, heads_to_prune: dict[int, list[int]]) -> None:
'''
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
'''
pass
@auto_docstring
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutputWithPooling]:
'''
hidden_states (`torch.FloatTensor` of shape `(batch_size, image_num_patches + text_seq_len, hidden_size)`):
The concatenated hidden states of unimodal encoders.
'''
pass
| 6
| 3
| 25
| 3
| 19
| 3
| 4
| 0.14
| 1
| 9
| 4
| 0
| 3
| 6
| 3
| 4
| 92
| 13
| 69
| 31
| 49
| 10
| 37
| 20
| 33
| 8
| 2
| 1
| 13
|
2,559
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/flava/modeling_flava.py
|
transformers.models.flava.modeling_flava.FlavaOutput
|
import torch
from torch import nn
class FlavaOutput(nn.Module):
def __init__(self, config: FlavaPossibleConfigs) -> None:
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = hidden_states + input_tensor
return hidden_states
|
class FlavaOutput(nn.Module):
def __init__(self, config: FlavaPossibleConfigs) -> None:
pass
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 6
| 1
| 5
| 0
| 1
| 0.1
| 1
| 2
| 0
| 0
| 2
| 2
| 2
| 12
| 14
| 3
| 10
| 5
| 7
| 1
| 10
| 5
| 7
| 1
| 1
| 0
| 2
|
2,560
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/flava/modeling_flava.py
|
transformers.models.flava.modeling_flava.FlavaPooler
|
from torch import nn
import torch
class FlavaPooler(nn.Module):
def __init__(self, config: FlavaPossibleConfigs):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states: torch.Tensor):
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
|
class FlavaPooler(nn.Module):
def __init__(self, config: FlavaPossibleConfigs):
pass
def forward(self, hidden_states: torch.Tensor):
pass
| 3
| 0
| 6
| 0
| 5
| 1
| 1
| 0.2
| 1
| 2
| 0
| 0
| 2
| 2
| 2
| 12
| 13
| 1
| 10
| 7
| 7
| 2
| 10
| 7
| 7
| 1
| 1
| 0
| 2
|
2,561
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/flava/modeling_flava.py
|
transformers.models.flava.modeling_flava.FlavaPreTrainedModel
|
from torch import nn
from .configuration_flava import FlavaConfig, FlavaImageCodebookConfig, FlavaImageConfig, FlavaMultimodalConfig, FlavaTextConfig
from ...modeling_utils import PreTrainedModel
from typing import Any, Optional, Union
from ...utils import ModelOutput, auto_docstring, filter_out_non_signature_kwargs, logging, torch_int
@auto_docstring
class FlavaPreTrainedModel(PreTrainedModel):
config: FlavaConfig
base_model_prefix = 'flava'
supports_gradient_checkpointing = True
def _init_weights(self, module: Union[nn.Linear, nn.Conv2d, nn.LayerNorm]) -> None:
"""Initialize the weights"""
if isinstance(module, (nn.Linear, nn.Conv2d)):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
elif isinstance(module, FlavaMaskedPredictionHead):
module.bias.data.zero_()
elif isinstance(module, FlavaImageEmbeddings):
module.cls_token.data.zero_()
module.position_embeddings.data.zero_()
if module.mask_token is not None:
module.mask_token.data.zero_()
elif isinstance(module, FlavaMultimodalModel):
if module.use_cls_token:
module.cls_token.data.zero_()
elif isinstance(module, FlavaModel):
module.logit_scale.data.fill_(self.config.logit_scale_init_value)
|
@auto_docstring
class FlavaPreTrainedModel(PreTrainedModel):
def _init_weights(self, module: Union[nn.Linear, nn.Conv2d, nn.LayerNorm]) -> None:
'''Initialize the weights'''
pass
| 3
| 1
| 15
| 0
| 12
| 3
| 6
| 0.44
| 1
| 0
| 0
| 6
| 1
| 0
| 1
| 1
| 25
| 2
| 16
| 5
| 14
| 7
| 14
| 5
| 12
| 6
| 1
| 2
| 6
|
2,562
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/flava/modeling_flava.py
|
transformers.models.flava.modeling_flava.FlavaPredictionHeadTransform
|
from torch import nn
from ...activations import ACT2FN
class FlavaPredictionHeadTransform(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
if isinstance(config.hidden_act, str):
self.transform_act_fn = ACT2FN[config.hidden_act]
else:
self.transform_act_fn = config.hidden_act
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
|
class FlavaPredictionHeadTransform(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states):
pass
| 3
| 0
| 7
| 0
| 7
| 0
| 2
| 0
| 1
| 2
| 0
| 0
| 2
| 3
| 2
| 12
| 15
| 1
| 14
| 6
| 11
| 0
| 13
| 6
| 10
| 2
| 1
| 1
| 3
|
2,563
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/flava/modeling_flava.py
|
transformers.models.flava.modeling_flava.FlavaSelfAttention
|
import torch
from torch import nn
from typing import Any, Optional, Union
import math
class FlavaSelfAttention(nn.Module):
def __init__(self, config: FlavaPossibleConfigs) -> None:
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and (not hasattr(config, 'embedding_size')):
raise ValueError(f'The hidden size {config.hidden_size} is not a multiple of the number of attention heads {config.num_attention_heads}.')
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias)
self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias)
self.value = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, output_attentions: bool=False) -> Union[tuple[torch.Tensor, torch.Tensor], tuple[torch.Tensor]]:
batch_size, seq_length, _ = hidden_states.shape
query_layer = self.query(hidden_states).view(batch_size, -1, self.num_attention_heads, self.attention_head_size).transpose(1, 2)
key_layer = self.key(hidden_states).view(batch_size, -1, self.num_attention_heads, self.attention_head_size).transpose(1, 2)
value_layer = self.value(hidden_states).view(batch_size, -1, self.num_attention_heads, self.attention_head_size).transpose(1, 2)
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
if attention_mask is not None:
attention_scores = attention_scores + attention_mask
attention_probs = nn.functional.softmax(attention_scores, dim=-1)
attention_probs = self.dropout(attention_probs)
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
return outputs
|
class FlavaSelfAttention(nn.Module):
def __init__(self, config: FlavaPossibleConfigs) -> None:
pass
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, output_attentions: bool=False) -> Union[tuple[torch.Tensor, torch.Tensor], tuple[torch.Tensor]]:
pass
| 3
| 0
| 21
| 4
| 14
| 2
| 2
| 0.14
| 1
| 5
| 0
| 0
| 3
| 7
| 3
| 13
| 65
| 15
| 44
| 27
| 34
| 6
| 35
| 21
| 31
| 4
| 1
| 1
| 7
|
2,564
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/flava/modeling_flava.py
|
transformers.models.flava.modeling_flava.FlavaSelfOutput
|
from torch import nn
import torch
class FlavaSelfOutput(nn.Module):
"""
The residual connection is defined in FlavaLayer (same as ViTLayer) instead of here (as is the case with other
models), due to the layernorm applied before each block.
"""
def __init__(self, config: FlavaPossibleConfigs) -> None:
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
return hidden_states
|
class FlavaSelfOutput(nn.Module):
'''
The residual connection is defined in FlavaLayer (same as ViTLayer) instead of here (as is the case with other
models), due to the layernorm applied before each block.
'''
def __init__(self, config: FlavaPossibleConfigs) -> None:
pass
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
pass
| 3
| 1
| 5
| 1
| 4
| 0
| 1
| 0.44
| 1
| 2
| 0
| 0
| 2
| 2
| 2
| 12
| 16
| 3
| 9
| 5
| 6
| 4
| 9
| 5
| 6
| 1
| 1
| 0
| 2
|
2,565
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/flava/modeling_flava.py
|
transformers.models.flava.modeling_flava.FlavaTextEmbeddings
|
from torch import nn
from typing import Any, Optional, Union
import torch
class FlavaTextEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings."""
def __init__(self, config):
super().__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.position_embedding_type = getattr(config, 'position_embedding_type', 'absolute')
self.register_buffer('position_ids', torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False)
self.register_buffer('token_type_ids', torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False)
def forward(self, input_ids: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None):
input_shape = input_ids.size()
seq_length = input_shape[1]
if position_ids is None:
position_ids = self.position_ids[:, :seq_length]
if token_type_ids is None:
if hasattr(self, 'token_type_ids'):
buffered_token_type_ids = self.token_type_ids[:, :seq_length]
buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length)
token_type_ids = buffered_token_type_ids_expanded
else:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
inputs_embeds = self.word_embeddings(input_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + token_type_embeddings
if self.position_embedding_type == 'absolute':
position_embeddings = self.position_embeddings(position_ids)
embeddings += position_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
|
class FlavaTextEmbeddings(nn.Module):
'''Construct the embeddings from word, position and token_type embeddings.'''
def __init__(self, config):
pass
def forward(self, input_ids: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None):
pass
| 3
| 1
| 26
| 3
| 20
| 3
| 3
| 0.17
| 1
| 2
| 0
| 0
| 2
| 6
| 2
| 12
| 55
| 7
| 41
| 22
| 33
| 7
| 31
| 17
| 28
| 5
| 1
| 2
| 6
|
2,566
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/flava/modeling_flava.py
|
transformers.models.flava.modeling_flava.FlavaTextModel
|
from torch import nn
from .configuration_flava import FlavaConfig, FlavaImageCodebookConfig, FlavaImageConfig, FlavaMultimodalConfig, FlavaTextConfig
import torch
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling
from typing import Any, Optional, Union
from ...utils import ModelOutput, auto_docstring, filter_out_non_signature_kwargs, logging, torch_int
@auto_docstring
class FlavaTextModel(FlavaPreTrainedModel):
config: FlavaTextConfig
base_model_prefix = 'flava.text_model'
def __init__(self, config: FlavaTextConfig, add_pooling_layer: bool=True):
"""
add_pooling_layer (bool, *optional*, defaults to `True`):
Whether to add a pooling layer
"""
super().__init__(config)
self.config = config
self.embeddings = FlavaTextEmbeddings(config)
self.encoder = FlavaEncoder(config)
self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.pooler = FlavaPooler(config) if add_pooling_layer else None
self.post_init()
def get_input_embeddings(self) -> PatchEmbeddings:
return self.embeddings.word_embeddings
def set_input_embeddings(self, value: nn.Module):
self.embeddings.word_embeddings = value
def _prune_heads(self, heads_to_prune: dict[int, list[int]]) -> None:
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutputWithPooling]:
"""
input_ids (`torch.LongTensor` of shape `(batch_size, text_seq_length)`):
Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See
[`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input
IDs?](../glossary#input-ids)
token_type_ids (`torch.LongTensor` of shape `(batch_size, text_seq_length)`, *optional*):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
1]`:
- 0 corresponds to a *sentence A* token,
- 1 corresponds to a *sentence B* token.
[What are token type IDs?](../glossary#token-type-ids)
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is None:
raise ValueError('You have to specify input_ids')
input_shape = input_ids.size()
if attention_mask is None:
attention_mask = torch.ones(input_shape, device=input_ids.device)
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, input_ids.device)
embedding_output = self.embeddings(input_ids=input_ids, token_type_ids=token_type_ids, position_ids=position_ids)
encoder_outputs = self.encoder(embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
sequence_output = encoder_outputs[0]
sequence_output = self.layernorm(sequence_output)
pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
if not return_dict:
return (sequence_output, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPooling(last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions)
|
@auto_docstring
class FlavaTextModel(FlavaPreTrainedModel):
def __init__(self, config: FlavaTextConfig, add_pooling_layer: bool=True):
'''
add_pooling_layer (bool, *optional*, defaults to `True`):
Whether to add a pooling layer
'''
pass
def get_input_embeddings(self) -> PatchEmbeddings:
pass
def set_input_embeddings(self, value: nn.Module):
pass
def _prune_heads(self, heads_to_prune: dict[int, list[int]]) -> None:
'''
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
'''
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutputWithPooling]:
'''
input_ids (`torch.LongTensor` of shape `(batch_size, text_seq_length)`):
Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See
[`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input
IDs?](../glossary#input-ids)
token_type_ids (`torch.LongTensor` of shape `(batch_size, text_seq_length)`, *optional*):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
1]`:
- 0 corresponds to a *sentence A* token,
- 1 corresponds to a *sentence B* token.
[What are token type IDs?](../glossary#token-type-ids)
'''
pass
| 8
| 3
| 17
| 2
| 13
| 2
| 3
| 0.14
| 1
| 12
| 6
| 0
| 5
| 5
| 5
| 6
| 99
| 16
| 73
| 31
| 51
| 10
| 37
| 20
| 31
| 8
| 2
| 1
| 14
|
2,567
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/flava/modeling_flava.py
|
transformers.models.flava.modeling_flava.PatchEmbeddings
|
from typing import Any, Optional, Union
import collections
from torch import nn
import torch
class PatchEmbeddings(nn.Module):
"""
Image to Patch Embedding.
"""
def __init__(self, image_size: int=224, patch_size: Union[int, tuple[int, int]]=16, num_channels: int=3, embed_dim: int=768):
super().__init__()
if not isinstance(image_size, collections.abc.Iterable):
image_size = (image_size, image_size)
if not isinstance(patch_size, collections.abc.Iterable):
patch_size = (patch_size, patch_size)
num_patches = image_size[1] // patch_size[1] * (image_size[0] // patch_size[0])
self.image_size = image_size
self.patch_size = patch_size
self.num_patches = num_patches
self.projection = nn.Conv2d(num_channels, embed_dim, kernel_size=patch_size, stride=patch_size)
def forward(self, pixel_values: torch.Tensor, interpolate_pos_encoding: bool=False) -> torch.Tensor:
batch_size, num_channels, height, width = pixel_values.shape
if not interpolate_pos_encoding:
if height != self.image_size[0] or width != self.image_size[1]:
raise ValueError(f"Input image size ({height}*{width}) doesn't match model ({self.image_size[0]}*{self.image_size[1]}).")
x = self.projection(pixel_values).flatten(2).transpose(1, 2)
return x
|
class PatchEmbeddings(nn.Module):
'''
Image to Patch Embedding.
'''
def __init__(self, image_size: int=224, patch_size: Union[int, tuple[int, int]]=16, num_channels: int=3, embed_dim: int=768):
pass
def forward(self, pixel_values: torch.Tensor, interpolate_pos_encoding: bool=False) -> torch.Tensor:
pass
| 3
| 1
| 14
| 1
| 14
| 0
| 3
| 0.11
| 1
| 5
| 0
| 0
| 2
| 4
| 2
| 12
| 34
| 3
| 28
| 16
| 19
| 3
| 19
| 10
| 16
| 3
| 1
| 2
| 6
|
2,568
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/flava/processing_flava.py
|
transformers.models.flava.processing_flava.FlavaProcessor
|
from ...processing_utils import ImagesKwargs, ProcessingKwargs, ProcessorMixin
import warnings
class FlavaProcessor(ProcessorMixin):
"""
Constructs a FLAVA processor which wraps a FLAVA image processor and a FLAVA tokenizer into a single processor.
[`FlavaProcessor`] offers all the functionalities of [`FlavaImageProcessor`] and [`BertTokenizerFast`]. See the
[`~FlavaProcessor.__call__`] and [`~FlavaProcessor.decode`] for more information.
Args:
image_processor ([`FlavaImageProcessor`], *optional*): The image processor is a required input.
tokenizer ([`BertTokenizerFast`], *optional*): The tokenizer is a required input.
"""
attributes = ['image_processor', 'tokenizer']
image_processor_class = 'FlavaImageProcessor'
tokenizer_class = ('BertTokenizer', 'BertTokenizerFast')
valid_processor_kwargs = FlavaProcessorKwargs
def __init__(self, image_processor=None, tokenizer=None, **kwargs):
feature_extractor = None
if 'feature_extractor' in kwargs:
warnings.warn('The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor` instead.', FutureWarning)
feature_extractor = kwargs.pop('feature_extractor')
image_processor = image_processor if image_processor is not None else feature_extractor
super().__init__(image_processor, tokenizer)
self.current_processor = self.image_processor
@property
def feature_extractor_class(self):
warnings.warn('`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.', FutureWarning)
return self.image_processor_class
@property
def feature_extractor(self):
warnings.warn('`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.', FutureWarning)
return self.image_processor
|
class FlavaProcessor(ProcessorMixin):
'''
Constructs a FLAVA processor which wraps a FLAVA image processor and a FLAVA tokenizer into a single processor.
[`FlavaProcessor`] offers all the functionalities of [`FlavaImageProcessor`] and [`BertTokenizerFast`]. See the
[`~FlavaProcessor.__call__`] and [`~FlavaProcessor.decode`] for more information.
Args:
image_processor ([`FlavaImageProcessor`], *optional*): The image processor is a required input.
tokenizer ([`BertTokenizerFast`], *optional*): The tokenizer is a required input.
'''
def __init__(self, image_processor=None, tokenizer=None, **kwargs):
pass
@property
def feature_extractor_class(self):
pass
@property
def feature_extractor_class(self):
pass
| 6
| 1
| 16
| 1
| 13
| 2
| 2
| 0.21
| 1
| 10
| 2
| 0
| 7
| 1
| 7
| 24
| 138
| 16
| 101
| 41
| 69
| 21
| 42
| 17
| 34
| 6
| 2
| 1
| 16
|
2,569
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/fnet/configuration_fnet.py
|
transformers.models.fnet.configuration_fnet.FNetConfig
|
from ...configuration_utils import PretrainedConfig
class FNetConfig(PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`FNetModel`]. It is used to instantiate an FNet
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the FNet
[google/fnet-base](https://huggingface.co/google/fnet-base) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 32000):
Vocabulary size of the FNet model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`FNetModel`] or [`TFFNetModel`].
hidden_size (`int`, *optional*, defaults to 768):
Dimension of the encoder layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
intermediate_size (`int`, *optional*, defaults to 3072):
Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu_new"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` are supported.
hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
max_position_embeddings (`int`, *optional*, defaults to 512):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
type_vocab_size (`int`, *optional*, defaults to 4):
The vocabulary size of the `token_type_ids` passed when calling [`FNetModel`] or [`TFFNetModel`].
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
The epsilon used by the layer normalization layers.
use_tpu_fourier_optimizations (`bool`, *optional*, defaults to `False`):
Determines whether to use TPU optimized FFTs. If `True`, the model will favor axis-wise FFTs transforms.
Set to `False` for GPU/CPU hardware, in which case n-dimensional FFTs are used.
tpu_short_seq_length (`int`, *optional*, defaults to 512):
The sequence length that is expected by the model when using TPUs. This will be used to initialize the DFT
matrix only when *use_tpu_fourier_optimizations* is set to `True` and the input sequence is shorter than or
equal to 4096 tokens.
Example:
```python
>>> from transformers import FNetConfig, FNetModel
>>> # Initializing a FNet fnet-base style configuration
>>> configuration = FNetConfig()
>>> # Initializing a model (with random weights) from the fnet-base style configuration
>>> model = FNetModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = 'fnet'
def __init__(self, vocab_size=32000, hidden_size=768, num_hidden_layers=12, intermediate_size=3072, hidden_act='gelu_new', hidden_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=4, initializer_range=0.02, layer_norm_eps=1e-12, use_tpu_fourier_optimizations=False, tpu_short_seq_length=512, pad_token_id=3, bos_token_id=1, eos_token_id=2, **kwargs):
super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.initializer_range = initializer_range
self.type_vocab_size = type_vocab_size
self.layer_norm_eps = layer_norm_eps
self.use_tpu_fourier_optimizations = use_tpu_fourier_optimizations
self.tpu_short_seq_length = tpu_short_seq_length
|
class FNetConfig(PretrainedConfig):
'''
This is the configuration class to store the configuration of a [`FNetModel`]. It is used to instantiate an FNet
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the FNet
[google/fnet-base](https://huggingface.co/google/fnet-base) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 32000):
Vocabulary size of the FNet model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`FNetModel`] or [`TFFNetModel`].
hidden_size (`int`, *optional*, defaults to 768):
Dimension of the encoder layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
intermediate_size (`int`, *optional*, defaults to 3072):
Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu_new"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` are supported.
hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
max_position_embeddings (`int`, *optional*, defaults to 512):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
type_vocab_size (`int`, *optional*, defaults to 4):
The vocabulary size of the `token_type_ids` passed when calling [`FNetModel`] or [`TFFNetModel`].
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
The epsilon used by the layer normalization layers.
use_tpu_fourier_optimizations (`bool`, *optional*, defaults to `False`):
Determines whether to use TPU optimized FFTs. If `True`, the model will favor axis-wise FFTs transforms.
Set to `False` for GPU/CPU hardware, in which case n-dimensional FFTs are used.
tpu_short_seq_length (`int`, *optional*, defaults to 512):
The sequence length that is expected by the model when using TPUs. This will be used to initialize the DFT
matrix only when *use_tpu_fourier_optimizations* is set to `True` and the input sequence is shorter than or
equal to 4096 tokens.
Example:
```python
>>> from transformers import FNetConfig, FNetModel
>>> # Initializing a FNet fnet-base style configuration
>>> configuration = FNetConfig()
>>> # Initializing a model (with random weights) from the fnet-base style configuration
>>> model = FNetModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```'''
def __init__(self, vocab_size=32000, hidden_size=768, num_hidden_layers=12, intermediate_size=3072, hidden_act='gelu_new', hidden_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=4, initializer_range=0.02, layer_norm_eps=1e-12, use_tpu_fourier_optimizations=False, tpu_short_seq_length=512, pad_token_id=3, bos_token_id=1, eos_token_id=2, **kwargs):
pass
| 2
| 1
| 33
| 1
| 32
| 0
| 1
| 1.41
| 1
| 1
| 0
| 0
| 1
| 12
| 1
| 1
| 93
| 11
| 34
| 33
| 14
| 48
| 16
| 15
| 14
| 1
| 1
| 0
| 1
|
2,570
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/fnet/modeling_fnet.py
|
transformers.models.fnet.modeling_fnet.FNetBasicFourierTransform
|
from ...utils import logging
from ...utils import auto_docstring, is_scipy_available
from functools import partial
import torch
from torch import nn
class FNetBasicFourierTransform(nn.Module):
def __init__(self, config):
super().__init__()
self._init_fourier_transform(config)
def _init_fourier_transform(self, config):
if not config.use_tpu_fourier_optimizations:
self.fourier_transform = partial(torch.fft.fftn, dim=(1, 2))
elif config.max_position_embeddings <= 4096:
if is_scipy_available():
self.register_buffer('dft_mat_hidden', torch.tensor(linalg.dft(config.hidden_size), dtype=torch.complex64))
self.register_buffer('dft_mat_seq', torch.tensor(linalg.dft(config.tpu_short_seq_length), dtype=torch.complex64))
self.fourier_transform = partial(two_dim_matmul, matrix_dim_one=self.dft_mat_seq, matrix_dim_two=self.dft_mat_hidden)
else:
logging.warning('SciPy is needed for DFT matrix calculation and is not found. Using TPU optimized fast fourier transform instead.')
self.fourier_transform = fftn
else:
self.fourier_transform = fftn
def forward(self, hidden_states):
outputs = self.fourier_transform(hidden_states).real
return (outputs,)
|
class FNetBasicFourierTransform(nn.Module):
def __init__(self, config):
pass
def _init_fourier_transform(self, config):
pass
def forward(self, hidden_states):
pass
| 4
| 0
| 11
| 0
| 9
| 1
| 2
| 0.14
| 1
| 2
| 0
| 0
| 3
| 2
| 3
| 13
| 36
| 3
| 29
| 7
| 25
| 4
| 17
| 6
| 13
| 4
| 1
| 2
| 6
|
2,571
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/fnet/modeling_fnet.py
|
transformers.models.fnet.modeling_fnet.FNetBasicOutput
|
from torch import nn
class FNetBasicOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states, input_tensor):
hidden_states = self.LayerNorm(input_tensor + hidden_states)
return hidden_states
|
class FNetBasicOutput(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states, input_tensor):
pass
| 3
| 0
| 3
| 0
| 3
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 2
| 1
| 2
| 12
| 8
| 1
| 7
| 4
| 4
| 0
| 7
| 4
| 4
| 1
| 1
| 0
| 2
|
2,572
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/fnet/modeling_fnet.py
|
transformers.models.fnet.modeling_fnet.FNetEmbeddings
|
from torch import nn
import torch
class FNetEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings."""
def __init__(self, config):
super().__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.projection = nn.Linear(config.hidden_size, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.register_buffer('position_ids', torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False)
self.register_buffer('token_type_ids', torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False)
def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None):
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
seq_length = input_shape[1]
if position_ids is None:
position_ids = self.position_ids[:, :seq_length]
if token_type_ids is None:
if hasattr(self, 'token_type_ids'):
buffered_token_type_ids = self.token_type_ids[:, :seq_length]
buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length)
token_type_ids = buffered_token_type_ids_expanded
else:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + token_type_embeddings
position_embeddings = self.position_embeddings(position_ids)
embeddings += position_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.projection(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
|
class FNetEmbeddings(nn.Module):
'''Construct the embeddings from word, position and token_type embeddings.'''
def __init__(self, config):
pass
def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None):
pass
| 3
| 1
| 28
| 5
| 20
| 4
| 4
| 0.2
| 1
| 1
| 0
| 0
| 2
| 6
| 2
| 12
| 59
| 11
| 40
| 16
| 37
| 8
| 34
| 16
| 31
| 6
| 1
| 2
| 7
|
2,573
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/fnet/modeling_fnet.py
|
transformers.models.fnet.modeling_fnet.FNetEncoder
|
from torch import nn
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, MaskedLMOutput, ModelOutput, MultipleChoiceModelOutput, NextSentencePredictorOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput
class FNetEncoder(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.layer = nn.ModuleList([FNetLayer(config) for _ in range(config.num_hidden_layers)])
self.gradient_checkpointing = False
def forward(self, hidden_states, output_hidden_states=False, return_dict=True):
all_hidden_states = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_outputs = layer_module(hidden_states)
hidden_states = layer_outputs[0]
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple((v for v in [hidden_states, all_hidden_states] if v is not None))
return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=all_hidden_states)
|
class FNetEncoder(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states, output_hidden_states=False, return_dict=True):
pass
| 3
| 0
| 13
| 3
| 10
| 0
| 4
| 0
| 1
| 6
| 2
| 0
| 2
| 3
| 2
| 12
| 28
| 7
| 21
| 9
| 18
| 0
| 20
| 9
| 17
| 7
| 1
| 2
| 8
|
2,574
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/fnet/modeling_fnet.py
|
transformers.models.fnet.modeling_fnet.FNetForMaskedLM
|
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from typing import Optional, Union
import torch
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, MaskedLMOutput, ModelOutput, MultipleChoiceModelOutput, NextSentencePredictorOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput
from ...utils import auto_docstring, is_scipy_available
@auto_docstring
class FNetForMaskedLM(FNetPreTrainedModel):
_tied_weights_keys = ['cls.predictions.decoder.bias', 'cls.predictions.decoder.weight']
def __init__(self, config):
super().__init__(config)
self.fnet = FNetModel(config)
self.cls = FNetOnlyMLMHead(config)
self.post_init()
def get_output_embeddings(self):
return self.cls.predictions.decoder
def set_output_embeddings(self, new_embeddings):
self.cls.predictions.decoder = new_embeddings
self.cls.predictions.bias = new_embeddings.bias
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, MaskedLMOutput]:
"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.fnet(input_ids, token_type_ids=token_type_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, output_hidden_states=output_hidden_states, return_dict=return_dict)
sequence_output = outputs[0]
prediction_scores = self.cls(sequence_output)
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (prediction_scores,) + outputs[2:]
return (masked_lm_loss,) + output if masked_lm_loss is not None else output
return MaskedLMOutput(loss=masked_lm_loss, logits=prediction_scores, hidden_states=outputs.hidden_states)
|
@auto_docstring
class FNetForMaskedLM(FNetPreTrainedModel):
def __init__(self, config):
pass
def get_output_embeddings(self):
pass
def set_output_embeddings(self, new_embeddings):
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, MaskedLMOutput]:
'''
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
'''
pass
| 7
| 1
| 13
| 2
| 10
| 2
| 2
| 0.17
| 1
| 6
| 3
| 0
| 4
| 2
| 4
| 5
| 65
| 11
| 47
| 24
| 27
| 8
| 25
| 14
| 20
| 5
| 2
| 1
| 8
|
2,575
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/fnet/modeling_fnet.py
|
transformers.models.fnet.modeling_fnet.FNetForMultipleChoice
|
from ...utils import auto_docstring, is_scipy_available
from typing import Optional, Union
import torch
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, MaskedLMOutput, ModelOutput, MultipleChoiceModelOutput, NextSentencePredictorOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
@auto_docstring
class FNetForMultipleChoice(FNetPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.fnet = FNetModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, 1)
self.post_init()
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, MultipleChoiceModelOutput]:
"""
input_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
token_type_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
1]`:
- 0 corresponds to a *sentence A* token,
- 1 corresponds to a *sentence B* token.
[What are token type IDs?](../glossary#token-type-ids)
position_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
config.max_position_embeddings - 1]`.
[What are position IDs?](../glossary#position-ids)
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_choices, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
is useful if you want more control over how to convert *input_ids* indices into associated vectors than the
model's internal embedding lookup matrix.
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
`input_ids` above)
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
inputs_embeds = inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1)) if inputs_embeds is not None else None
outputs = self.fnet(input_ids, token_type_ids=token_type_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, output_hidden_states=output_hidden_states, return_dict=return_dict)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.view(-1, num_choices)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
if not return_dict:
output = (reshaped_logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return MultipleChoiceModelOutput(loss=loss, logits=reshaped_logits, hidden_states=outputs.hidden_states)
|
@auto_docstring
class FNetForMultipleChoice(FNetPreTrainedModel):
def __init__(self, config):
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, MultipleChoiceModelOutput]:
'''
input_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
token_type_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
1]`:
- 0 corresponds to a *sentence A* token,
- 1 corresponds to a *sentence B* token.
[What are token type IDs?](../glossary#token-type-ids)
position_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
config.max_position_embeddings - 1]`.
[What are position IDs?](../glossary#position-ids)
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_choices, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
is useful if you want more control over how to convert *input_ids* indices into associated vectors than the
model's internal embedding lookup matrix.
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
`input_ids` above)
'''
pass
| 5
| 1
| 31
| 5
| 23
| 4
| 6
| 0.13
| 1
| 5
| 2
| 0
| 2
| 3
| 2
| 3
| 70
| 10
| 53
| 24
| 35
| 7
| 27
| 14
| 24
| 10
| 2
| 1
| 11
|
2,576
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/fnet/modeling_fnet.py
|
transformers.models.fnet.modeling_fnet.FNetForNextSentencePrediction
|
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...utils import auto_docstring, is_scipy_available
from typing import Optional, Union
import torch
import warnings
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, MaskedLMOutput, ModelOutput, MultipleChoiceModelOutput, NextSentencePredictorOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput
@auto_docstring(custom_intro='\n FNet Model with a `next sentence prediction (classification)` head on top.\n ')
class FNetForNextSentencePrediction(FNetPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.fnet = FNetModel(config)
self.cls = FNetOnlyNSPHead(config)
self.post_init()
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, **kwargs) -> Union[tuple, NextSentencePredictorOutput]:
"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair
(see `input_ids` docstring). Indices should be in `[0, 1]`:
- 0 indicates sequence B is a continuation of sequence A,
- 1 indicates sequence B is a random sequence.
Example:
```python
>>> from transformers import AutoTokenizer, FNetForNextSentencePrediction
>>> import torch
>>> tokenizer = AutoTokenizer.from_pretrained("google/fnet-base")
>>> model = FNetForNextSentencePrediction.from_pretrained("google/fnet-base")
>>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced."
>>> next_sentence = "The sky is blue due to the shorter wavelength of blue light."
>>> encoding = tokenizer(prompt, next_sentence, return_tensors="pt")
>>> outputs = model(**encoding, labels=torch.LongTensor([1]))
>>> logits = outputs.logits
>>> assert logits[0, 0] < logits[0, 1] # next sentence was random
```"""
if 'next_sentence_label' in kwargs:
warnings.warn('The `next_sentence_label` argument is deprecated and will be removed in a future version, use `labels` instead.', FutureWarning)
labels = kwargs.pop('next_sentence_label')
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.fnet(input_ids, token_type_ids=token_type_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, output_hidden_states=output_hidden_states, return_dict=return_dict)
pooled_output = outputs[1]
seq_relationship_scores = self.cls(pooled_output)
next_sentence_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
next_sentence_loss = loss_fct(seq_relationship_scores.view(-1, 2), labels.view(-1))
if not return_dict:
output = (seq_relationship_scores,) + outputs[2:]
return (next_sentence_loss,) + output if next_sentence_loss is not None else output
return NextSentencePredictorOutput(loss=next_sentence_loss, logits=seq_relationship_scores, hidden_states=outputs.hidden_states)
|
@auto_docstring(custom_intro='\n FNet Model with a `next sentence prediction (classification)` head on top.\n ')
class FNetForNextSentencePrediction(FNetPreTrainedModel):
def __init__(self, config):
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, **kwargs) -> Union[tuple, NextSentencePredictorOutput]:
'''
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair
(see `input_ids` docstring). Indices should be in `[0, 1]`:
- 0 indicates sequence B is a continuation of sequence A,
- 1 indicates sequence B is a random sequence.
Example:
```python
>>> from transformers import AutoTokenizer, FNetForNextSentencePrediction
>>> import torch
>>> tokenizer = AutoTokenizer.from_pretrained("google/fnet-base")
>>> model = FNetForNextSentencePrediction.from_pretrained("google/fnet-base")
>>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced."
>>> next_sentence = "The sky is blue due to the shorter wavelength of blue light."
>>> encoding = tokenizer(prompt, next_sentence, return_tensors="pt")
>>> outputs = model(**encoding, labels=torch.LongTensor([1]))
>>> logits = outputs.logits
>>> assert logits[0, 0] < logits[0, 1] # next sentence was random
```'''
pass
| 5
| 1
| 41
| 8
| 23
| 11
| 4
| 0.43
| 1
| 7
| 3
| 0
| 2
| 2
| 2
| 3
| 86
| 16
| 49
| 22
| 34
| 21
| 22
| 11
| 19
| 6
| 2
| 1
| 7
|
2,577
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/fnet/modeling_fnet.py
|
transformers.models.fnet.modeling_fnet.FNetForPreTraining
|
import torch
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...utils import auto_docstring, is_scipy_available
from typing import Optional, Union
@auto_docstring(custom_intro='\n FNet Model with two heads on top as done during the pretraining: a `masked language modeling` head and a `next\n sentence prediction (classification)` head.\n ')
class FNetForPreTraining(FNetPreTrainedModel):
_tied_weights_keys = ['cls.predictions.decoder.bias', 'cls.predictions.decoder.weight']
def __init__(self, config):
super().__init__(config)
self.fnet = FNetModel(config)
self.cls = FNetPreTrainingHeads(config)
self.post_init()
def get_output_embeddings(self):
return self.cls.predictions.decoder
def set_output_embeddings(self, new_embeddings):
self.cls.predictions.decoder = new_embeddings
self.cls.predictions.bias = new_embeddings.bias
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, next_sentence_label: Optional[torch.Tensor]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, FNetForPreTrainingOutput]:
"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
next_sentence_label (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair
(see `input_ids` docstring) Indices should be in `[0, 1]`:
- 0 indicates sequence B is a continuation of sequence A,
- 1 indicates sequence B is a random sequence.
Example:
```python
>>> from transformers import AutoTokenizer, FNetForPreTraining
>>> import torch
>>> tokenizer = AutoTokenizer.from_pretrained("google/fnet-base")
>>> model = FNetForPreTraining.from_pretrained("google/fnet-base")
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> prediction_logits = outputs.prediction_logits
>>> seq_relationship_logits = outputs.seq_relationship_logits
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.fnet(input_ids, token_type_ids=token_type_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, output_hidden_states=output_hidden_states, return_dict=return_dict)
sequence_output, pooled_output = outputs[:2]
prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output)
total_loss = None
if labels is not None and next_sentence_label is not None:
loss_fct = CrossEntropyLoss()
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
total_loss = masked_lm_loss + next_sentence_loss
if not return_dict:
output = (prediction_scores, seq_relationship_score) + outputs[2:]
return (total_loss,) + output if total_loss is not None else output
return FNetForPreTrainingOutput(loss=total_loss, prediction_logits=prediction_scores, seq_relationship_logits=seq_relationship_score, hidden_states=outputs.hidden_states)
|
@auto_docstring(custom_intro='\n FNet Model with two heads on top as done during the pretraining: a `masked language modeling` head and a `next\n sentence prediction (classification)` head.\n ')
class FNetForPreTraining(FNetPreTrainedModel):
def __init__(self, config):
pass
def get_output_embeddings(self):
pass
def set_output_embeddings(self, new_embeddings):
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, next_sentence_label: Optional[torch.Tensor]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, FNetForPreTrainingOutput]:
'''
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
next_sentence_label (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair
(see `input_ids` docstring) Indices should be in `[0, 1]`:
- 0 indicates sequence B is a continuation of sequence A,
- 1 indicates sequence B is a random sequence.
Example:
```python
>>> from transformers import AutoTokenizer, FNetForPreTraining
>>> import torch
>>> tokenizer = AutoTokenizer.from_pretrained("google/fnet-base")
>>> model = FNetForPreTraining.from_pretrained("google/fnet-base")
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> prediction_logits = outputs.prediction_logits
>>> seq_relationship_logits = outputs.seq_relationship_logits
```'''
pass
| 7
| 1
| 21
| 3
| 12
| 6
| 2
| 0.49
| 1
| 6
| 3
| 0
| 4
| 2
| 4
| 5
| 92
| 16
| 51
| 27
| 34
| 25
| 27
| 16
| 22
| 5
| 2
| 1
| 8
|
2,578
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/fnet/modeling_fnet.py
|
transformers.models.fnet.modeling_fnet.FNetForPreTrainingOutput
|
from dataclasses import dataclass
import torch
from typing import Optional, Union
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, MaskedLMOutput, ModelOutput, MultipleChoiceModelOutput, NextSentencePredictorOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput
from ...utils import auto_docstring, is_scipy_available
@dataclass
@auto_docstring(custom_intro='\n Output type of [`FNetForPreTraining`].\n ')
class FNetForPreTrainingOutput(ModelOutput):
"""
loss (*optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`):
Total loss as the sum of the masked language modeling loss and the next sequence prediction
(classification) loss.
prediction_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
seq_relationship_logits (`torch.FloatTensor` of shape `(batch_size, 2)`):
Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation
before SoftMax).
"""
loss: Optional[torch.FloatTensor] = None
prediction_logits: Optional[torch.FloatTensor] = None
seq_relationship_logits: Optional[torch.FloatTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor]] = None
|
@dataclass
@auto_docstring(custom_intro='\n Output type of [`FNetForPreTraining`].\n ')
class FNetForPreTrainingOutput(ModelOutput):
'''
loss (*optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`):
Total loss as the sum of the masked language modeling loss and the next sequence prediction
(classification) loss.
prediction_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
seq_relationship_logits (`torch.FloatTensor` of shape `(batch_size, 2)`):
Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation
before SoftMax).
'''
pass
| 3
| 1
| 0
| 0
| 0
| 0
| 0
| 3.2
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 23
| 2
| 5
| 5
| 4
| 16
| 5
| 5
| 4
| 0
| 1
| 0
| 0
|
2,579
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/fnet/modeling_fnet.py
|
transformers.models.fnet.modeling_fnet.FNetForQuestionAnswering
|
from ...utils import auto_docstring, is_scipy_available
from torch import nn
import torch
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, MaskedLMOutput, ModelOutput, MultipleChoiceModelOutput, NextSentencePredictorOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput
from typing import Optional, Union
@auto_docstring
class FNetForQuestionAnswering(FNetPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.fnet = FNetModel(config)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
self.post_init()
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, start_positions: Optional[torch.Tensor]=None, end_positions: Optional[torch.Tensor]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, QuestionAnsweringModelOutput]:
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.fnet(input_ids, token_type_ids=token_type_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, output_hidden_states=output_hidden_states, return_dict=return_dict)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1).contiguous()
end_logits = end_logits.squeeze(-1).contiguous()
total_loss = None
if start_positions is not None and end_positions is not None:
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
ignored_index = start_logits.size(1)
start_positions = start_positions.clamp(0, ignored_index)
end_positions = end_positions.clamp(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
if not return_dict:
output = (start_logits, end_logits) + outputs[2:]
return (total_loss,) + output if total_loss is not None else output
return QuestionAnsweringModelOutput(loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states)
|
@auto_docstring
class FNetForQuestionAnswering(FNetPreTrainedModel):
def __init__(self, config):
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, start_positions: Optional[torch.Tensor]=None, end_positions: Optional[torch.Tensor]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, QuestionAnsweringModelOutput]:
pass
| 5
| 0
| 37
| 5
| 25
| 7
| 4
| 0.23
| 1
| 5
| 2
| 0
| 2
| 3
| 2
| 3
| 81
| 11
| 57
| 27
| 38
| 13
| 32
| 16
| 29
| 7
| 2
| 2
| 8
|
2,580
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/fnet/modeling_fnet.py
|
transformers.models.fnet.modeling_fnet.FNetForSequenceClassification
|
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from typing import Optional, Union
from torch import nn
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, MaskedLMOutput, ModelOutput, MultipleChoiceModelOutput, NextSentencePredictorOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput
import torch
from ...utils import auto_docstring, is_scipy_available
@auto_docstring(custom_intro='\n FNet Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled\n output) e.g. for GLUE tasks.\n ')
class FNetForSequenceClassification(FNetPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.fnet = FNetModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.post_init()
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, SequenceClassifierOutput]:
"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.fnet(input_ids, token_type_ids=token_type_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, output_hidden_states=output_hidden_states, return_dict=return_dict)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
loss = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
self.config.problem_type = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = 'single_label_classification'
else:
self.config.problem_type = 'multi_label_classification'
if self.config.problem_type == 'regression':
loss_fct = MSELoss()
if self.num_labels == 1:
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(logits, labels)
elif self.config.problem_type == 'single_label_classification':
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
elif self.config.problem_type == 'multi_label_classification':
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
if not return_dict:
output = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return SequenceClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states)
|
@auto_docstring(custom_intro='\n FNet Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled\n output) e.g. for GLUE tasks.\n ')
class FNetForSequenceClassification(FNetPreTrainedModel):
def __init__(self, config):
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, SequenceClassifierOutput]:
'''
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
'''
pass
| 5
| 1
| 34
| 4
| 27
| 4
| 7
| 0.11
| 1
| 6
| 2
| 0
| 2
| 4
| 2
| 3
| 76
| 8
| 61
| 23
| 43
| 7
| 34
| 13
| 31
| 12
| 2
| 3
| 13
|
2,581
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/fnet/modeling_fnet.py
|
transformers.models.fnet.modeling_fnet.FNetForTokenClassification
|
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from typing import Optional, Union
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, MaskedLMOutput, ModelOutput, MultipleChoiceModelOutput, NextSentencePredictorOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput
import torch
from ...utils import auto_docstring, is_scipy_available
from torch import nn
@auto_docstring
class FNetForTokenClassification(FNetPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.fnet = FNetModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.post_init()
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, TokenClassifierOutput]:
"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.fnet(input_ids, token_type_ids=token_type_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, output_hidden_states=output_hidden_states, return_dict=return_dict)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return TokenClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states)
|
@auto_docstring
class FNetForTokenClassification(FNetPreTrainedModel):
def __init__(self, config):
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, TokenClassifierOutput]:
'''
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
'''
pass
| 5
| 1
| 26
| 5
| 19
| 3
| 3
| 0.14
| 1
| 5
| 2
| 0
| 2
| 4
| 2
| 3
| 60
| 10
| 44
| 23
| 26
| 6
| 22
| 13
| 19
| 5
| 2
| 1
| 6
|
2,582
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/fnet/modeling_fnet.py
|
transformers.models.fnet.modeling_fnet.FNetFourierTransform
|
from torch import nn
class FNetFourierTransform(nn.Module):
def __init__(self, config):
super().__init__()
self.self = FNetBasicFourierTransform(config)
self.output = FNetBasicOutput(config)
def forward(self, hidden_states):
self_outputs = self.self(hidden_states)
fourier_output = self.output(self_outputs[0], hidden_states)
outputs = (fourier_output,)
return outputs
|
class FNetFourierTransform(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states):
pass
| 3
| 0
| 5
| 0
| 5
| 0
| 1
| 0
| 1
| 3
| 2
| 0
| 2
| 2
| 2
| 12
| 11
| 1
| 10
| 8
| 7
| 0
| 10
| 8
| 7
| 1
| 1
| 0
| 2
|
2,583
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/fnet/modeling_fnet.py
|
transformers.models.fnet.modeling_fnet.FNetIntermediate
|
from torch import nn
from ...activations import ACT2FN
import torch
class FNetIntermediate(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
|
class FNetIntermediate(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 6
| 0
| 6
| 0
| 2
| 0
| 1
| 3
| 0
| 0
| 2
| 2
| 2
| 12
| 13
| 1
| 12
| 5
| 9
| 0
| 11
| 5
| 8
| 2
| 1
| 1
| 3
|
2,584
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/fnet/modeling_fnet.py
|
transformers.models.fnet.modeling_fnet.FNetLMPredictionHead
|
from torch import nn
import torch
class FNetLMPredictionHead(nn.Module):
def __init__(self, config):
super().__init__()
self.transform = FNetPredictionHeadTransform(config)
self.decoder = nn.Linear(config.hidden_size, config.vocab_size)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
def _tie_weights(self) -> None:
if self.decoder.bias.device.type == 'meta':
self.decoder.bias = self.bias
else:
self.bias = self.decoder.bias
|
class FNetLMPredictionHead(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states):
pass
def _tie_weights(self) -> None:
pass
| 4
| 0
| 7
| 1
| 5
| 1
| 1
| 0.25
| 1
| 2
| 1
| 0
| 3
| 3
| 3
| 13
| 24
| 4
| 16
| 7
| 12
| 4
| 15
| 7
| 11
| 2
| 1
| 1
| 4
|
2,585
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/fnet/modeling_fnet.py
|
transformers.models.fnet.modeling_fnet.FNetLayer
|
from ...modeling_layers import GradientCheckpointingLayer
from ...pytorch_utils import apply_chunking_to_forward
class FNetLayer(GradientCheckpointingLayer):
def __init__(self, config):
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.fourier = FNetFourierTransform(config)
self.intermediate = FNetIntermediate(config)
self.output = FNetOutput(config)
def forward(self, hidden_states):
self_fourier_outputs = self.fourier(hidden_states)
fourier_output = self_fourier_outputs[0]
layer_output = apply_chunking_to_forward(self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, fourier_output)
outputs = (layer_output,)
return outputs
def feed_forward_chunk(self, fourier_output):
intermediate_output = self.intermediate(fourier_output)
layer_output = self.output(intermediate_output, fourier_output)
return layer_output
|
class FNetLayer(GradientCheckpointingLayer):
def __init__(self, config):
pass
def forward(self, hidden_states):
pass
def feed_forward_chunk(self, fourier_output):
pass
| 4
| 0
| 7
| 1
| 6
| 0
| 1
| 0.05
| 1
| 4
| 3
| 0
| 3
| 5
| 3
| 13
| 25
| 5
| 20
| 15
| 16
| 1
| 18
| 15
| 14
| 1
| 1
| 0
| 3
|
2,586
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/fnet/modeling_fnet.py
|
transformers.models.fnet.modeling_fnet.FNetModel
|
import torch
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, MaskedLMOutput, ModelOutput, MultipleChoiceModelOutput, NextSentencePredictorOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput
from typing import Optional, Union
from ...utils import auto_docstring, is_scipy_available
@auto_docstring
class FNetModel(FNetPreTrainedModel):
"""
The model can behave as an encoder, following the architecture described in [FNet: Mixing Tokens with Fourier
Transforms](https://huggingface.co/papers/2105.03824) by James Lee-Thorp, Joshua Ainslie, Ilya Eckstein, Santiago Ontanon.
"""
def __init__(self, config, add_pooling_layer=True):
"""
add_pooling_layer (bool, *optional*, defaults to `True`):
Whether to add a pooling layer
"""
super().__init__(config)
self.config = config
self.embeddings = FNetEmbeddings(config)
self.encoder = FNetEncoder(config)
self.pooler = FNetPooler(config) if add_pooling_layer else None
self.post_init()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutput]:
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time')
elif input_ids is not None:
input_shape = input_ids.size()
batch_size, seq_length = input_shape
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
batch_size, seq_length = input_shape
else:
raise ValueError('You have to specify either input_ids or inputs_embeds')
if self.config.use_tpu_fourier_optimizations and seq_length <= 4096 and (self.config.tpu_short_seq_length != seq_length):
raise ValueError('The `tpu_short_seq_length` in FNetConfig should be set equal to the sequence length being passed to the model when using TPU optimizations.')
device = input_ids.device if input_ids is not None else inputs_embeds.device
if token_type_ids is None:
if hasattr(self.embeddings, 'token_type_ids'):
buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length]
buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length)
token_type_ids = buffered_token_type_ids_expanded
else:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
embedding_output = self.embeddings(input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds)
encoder_outputs = self.encoder(embedding_output, output_hidden_states=output_hidden_states, return_dict=return_dict)
sequence_output = encoder_outputs[0]
pooler_output = self.pooler(sequence_output) if self.pooler is not None else None
if not return_dict:
return (sequence_output, pooler_output) + encoder_outputs[1:]
return BaseModelOutputWithPooling(last_hidden_state=sequence_output, pooler_output=pooler_output, hidden_states=encoder_outputs.hidden_states)
|
@auto_docstring
class FNetModel(FNetPreTrainedModel):
'''
The model can behave as an encoder, following the architecture described in [FNet: Mixing Tokens with Fourier
Transforms](https://huggingface.co/papers/2105.03824) by James Lee-Thorp, Joshua Ainslie, Ilya Eckstein, Santiago Ontanon.
'''
def __init__(self, config, add_pooling_layer=True):
'''
add_pooling_layer (bool, *optional*, defaults to `True`):
Whether to add a pooling layer
'''
pass
def get_input_embeddings(self):
pass
def set_input_embeddings(self, value):
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutput]:
pass
| 7
| 2
| 21
| 3
| 18
| 0
| 4
| 0.06
| 1
| 9
| 5
| 0
| 4
| 4
| 4
| 5
| 100
| 17
| 78
| 27
| 59
| 5
| 38
| 18
| 33
| 12
| 2
| 2
| 16
|
2,587
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/fnet/modeling_fnet.py
|
transformers.models.fnet.modeling_fnet.FNetOnlyMLMHead
|
from torch import nn
class FNetOnlyMLMHead(nn.Module):
def __init__(self, config):
super().__init__()
self.predictions = FNetLMPredictionHead(config)
def forward(self, sequence_output):
prediction_scores = self.predictions(sequence_output)
return prediction_scores
|
class FNetOnlyMLMHead(nn.Module):
def __init__(self, config):
pass
def forward(self, sequence_output):
pass
| 3
| 0
| 3
| 0
| 3
| 0
| 1
| 0
| 1
| 2
| 1
| 0
| 2
| 1
| 2
| 12
| 8
| 1
| 7
| 5
| 4
| 0
| 7
| 5
| 4
| 1
| 1
| 0
| 2
|
2,588
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/fnet/modeling_fnet.py
|
transformers.models.fnet.modeling_fnet.FNetOnlyNSPHead
|
from torch import nn
class FNetOnlyNSPHead(nn.Module):
def __init__(self, config):
super().__init__()
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, pooled_output):
seq_relationship_score = self.seq_relationship(pooled_output)
return seq_relationship_score
|
class FNetOnlyNSPHead(nn.Module):
def __init__(self, config):
pass
def forward(self, pooled_output):
pass
| 3
| 0
| 3
| 0
| 3
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 2
| 1
| 2
| 12
| 8
| 1
| 7
| 5
| 4
| 0
| 7
| 5
| 4
| 1
| 1
| 0
| 2
|
2,589
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/fnet/modeling_fnet.py
|
transformers.models.fnet.modeling_fnet.FNetOutput
|
import torch
from torch import nn
class FNetOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
|
class FNetOutput(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 5
| 0
| 5
| 0
| 1
| 0
| 1
| 2
| 0
| 0
| 2
| 3
| 2
| 12
| 12
| 1
| 11
| 6
| 8
| 0
| 11
| 6
| 8
| 1
| 1
| 0
| 2
|
2,590
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/fnet/modeling_fnet.py
|
transformers.models.fnet.modeling_fnet.FNetPooler
|
import torch
from torch import nn
class FNetPooler(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
|
class FNetPooler(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 6
| 0
| 5
| 1
| 1
| 0.2
| 1
| 2
| 0
| 0
| 2
| 2
| 2
| 12
| 13
| 1
| 10
| 7
| 7
| 2
| 10
| 7
| 7
| 1
| 1
| 0
| 2
|
2,591
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/fnet/modeling_fnet.py
|
transformers.models.fnet.modeling_fnet.FNetPreTrainedModel
|
from ...utils import auto_docstring, is_scipy_available
from .configuration_fnet import FNetConfig
from torch import nn
from ...modeling_utils import PreTrainedModel
@auto_docstring
class FNetPreTrainedModel(PreTrainedModel):
config: FNetConfig
base_model_prefix = 'fnet'
supports_gradient_checkpointing = True
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, nn.Linear):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
|
@auto_docstring
class FNetPreTrainedModel(PreTrainedModel):
def _init_weights(self, module):
'''Initialize the weights'''
pass
| 3
| 1
| 16
| 0
| 12
| 4
| 6
| 0.5
| 1
| 0
| 0
| 8
| 1
| 0
| 1
| 1
| 26
| 2
| 16
| 5
| 14
| 8
| 14
| 5
| 12
| 6
| 1
| 2
| 6
|
2,592
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/fnet/modeling_fnet.py
|
transformers.models.fnet.modeling_fnet.FNetPreTrainingHeads
|
from torch import nn
class FNetPreTrainingHeads(nn.Module):
def __init__(self, config):
super().__init__()
self.predictions = FNetLMPredictionHead(config)
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, sequence_output, pooled_output):
prediction_scores = self.predictions(sequence_output)
seq_relationship_score = self.seq_relationship(pooled_output)
return (prediction_scores, seq_relationship_score)
|
class FNetPreTrainingHeads(nn.Module):
def __init__(self, config):
pass
def forward(self, sequence_output, pooled_output):
pass
| 3
| 0
| 4
| 0
| 4
| 0
| 1
| 0
| 1
| 2
| 1
| 0
| 2
| 2
| 2
| 12
| 10
| 1
| 9
| 7
| 6
| 0
| 9
| 7
| 6
| 1
| 1
| 0
| 2
|
2,593
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/fnet/modeling_fnet.py
|
transformers.models.fnet.modeling_fnet.FNetPredictionHeadTransform
|
from torch import nn
from ...activations import ACT2FN
import torch
class FNetPredictionHeadTransform(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
if isinstance(config.hidden_act, str):
self.transform_act_fn = ACT2FN[config.hidden_act]
else:
self.transform_act_fn = config.hidden_act
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
|
class FNetPredictionHeadTransform(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 7
| 0
| 7
| 0
| 2
| 0
| 1
| 3
| 0
| 0
| 2
| 3
| 2
| 12
| 15
| 1
| 14
| 6
| 11
| 0
| 13
| 6
| 10
| 2
| 1
| 1
| 3
|
2,594
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/fnet/tokenization_fnet.py
|
transformers.models.fnet.tokenization_fnet.FNetTokenizer
|
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
import unicodedata
from ...utils.import_utils import requires
import os
from shutil import copyfile
import sentencepiece as spm
from typing import Any, Optional
@requires(backends=('sentencepiece',))
class FNetTokenizer(PreTrainedTokenizer):
"""
Construct an FNet tokenizer. Adapted from [`AlbertTokenizer`]. Based on
[SentencePiece](https://github.com/google/sentencepiece). This tokenizer inherits from [`PreTrainedTokenizer`]
which contains most of the main methods. Users should refer to this superclass for more information regarding those
methods.
Args:
vocab_file (`str`):
[SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that
contains the vocabulary necessary to instantiate a tokenizer.
do_lower_case (`bool`, *optional*, defaults to `False`):
Whether or not to lowercase the input when tokenizing.
remove_space (`bool`, *optional*, defaults to `True`):
Whether or not to strip the text when tokenizing (removing excess spaces before and after the string).
keep_accents (`bool`, *optional*, defaults to `True`):
Whether or not to keep accents when tokenizing.
unk_token (`str`, *optional*, defaults to `"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
sep_token (`str`, *optional*, defaults to `"[SEP]"`):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
sequence classification or for a text and a question for question answering. It is also used as the last
token of a sequence built with special tokens.
pad_token (`str`, *optional*, defaults to `"<pad>"`):
The token used for padding, for example when batching sequences of different lengths.
cls_token (`str`, *optional*, defaults to `"[CLS]"`):
The classifier token which is used when doing sequence classification (classification of the whole sequence
instead of per-token classification). It is the first token of the sequence when built with special tokens.
mask_token (`str`, *optional*, defaults to `"[MASK]"`):
The token used for masking values. This is the token used when training this model with masked language
modeling. This is the token which the model will try to predict.
sp_model_kwargs (`dict`, *optional*):
Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for
SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things,
to set:
- `enable_sampling`: Enable subword regularization.
- `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout.
- `nbest_size = {0,1}`: No sampling is performed.
- `nbest_size > 1`: samples from the nbest_size results.
- `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice)
using forward-filtering-and-backward-sampling algorithm.
- `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for
BPE-dropout.
Attributes:
sp_model (`SentencePieceProcessor`):
The *SentencePiece* processor that is used for every conversion (string, tokens and IDs).
"""
vocab_files_names = VOCAB_FILES_NAMES
model_input_names = ['input_ids', 'token_type_ids']
def __init__(self, vocab_file, do_lower_case=False, remove_space=True, keep_accents=True, unk_token='<unk>', sep_token='[SEP]', pad_token='<pad>', cls_token='[CLS]', mask_token='[MASK]', sp_model_kwargs: Optional[dict[str, Any]]=None, **kwargs) -> None:
mask_token = AddedToken(mask_token, lstrip=True, special=True) if isinstance(mask_token, str) else mask_token
cls_token = AddedToken(cls_token, special=True) if isinstance(cls_token, str) else cls_token
sep_token = AddedToken(sep_token, special=True) if isinstance(sep_token, str) else sep_token
mask_token = AddedToken(mask_token, special=True) if isinstance(mask_token, str) else mask_token
self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
self.do_lower_case = do_lower_case
self.remove_space = remove_space
self.keep_accents = keep_accents
self.vocab_file = vocab_file
self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(vocab_file)
super().__init__(do_lower_case=do_lower_case, remove_space=remove_space, keep_accents=keep_accents, unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, sp_model_kwargs=self.sp_model_kwargs, **kwargs)
@property
def vocab_size(self):
return len(self.sp_model)
def get_vocab(self):
vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__(self):
state = self.__dict__.copy()
state['sp_model'] = None
return state
def __setstate__(self, d):
self.__dict__ = d
if not hasattr(self, 'sp_model_kwargs'):
self.sp_model_kwargs = {}
self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def preprocess_text(self, inputs):
if self.remove_space:
outputs = ' '.join(inputs.strip().split())
else:
outputs = inputs
outputs = outputs.replace('``', '"').replace("''", '"')
if not self.keep_accents:
outputs = unicodedata.normalize('NFKD', outputs)
outputs = ''.join([c for c in outputs if not unicodedata.combining(c)])
if self.do_lower_case:
outputs = outputs.lower()
return outputs
def _tokenize(self, text: str) -> list[str]:
"""Tokenize a string."""
text = self.preprocess_text(text)
pieces = self.sp_model.encode(text, out_type=str)
new_pieces = []
for piece in pieces:
if len(piece) > 1 and piece[-1] == ',' and piece[-2].isdigit():
cur_pieces = self.sp_model.EncodeAsPieces(piece[:-1].replace(SPIECE_UNDERLINE, ''))
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0]) == 1:
cur_pieces = cur_pieces[1:]
else:
cur_pieces[0] = cur_pieces[0][1:]
cur_pieces.append(piece[-1])
new_pieces.extend(cur_pieces)
else:
new_pieces.append(piece)
return new_pieces
def _convert_token_to_id(self, token):
"""Converts a token (str) in an id using the vocab."""
return self.sp_model.PieceToId(token)
def _convert_id_to_token(self, index):
"""Converts an index (integer) in a token (str) using the vocab."""
return self.sp_model.IdToPiece(index)
def convert_tokens_to_string(self, tokens):
"""Converts a sequence of tokens (string) in a single string."""
current_sub_tokens = []
out_string = ''
prev_is_special = False
for token in tokens:
if token in self.all_special_tokens:
if not prev_is_special:
out_string += ' '
out_string += self.sp_model.decode(current_sub_tokens) + token
prev_is_special = True
current_sub_tokens = []
else:
current_sub_tokens.append(token)
prev_is_special = False
out_string += self.sp_model.decode(current_sub_tokens)
return out_string.strip()
def _decode(self, token_ids: list[int], skip_special_tokens: bool=False, clean_up_tokenization_spaces: Optional[bool]=None, spaces_between_special_tokens: bool=False, **kwargs) -> str:
text = super()._decode(token_ids=token_ids, skip_special_tokens=skip_special_tokens, clean_up_tokenization_spaces=clean_up_tokenization_spaces, spaces_between_special_tokens=spaces_between_special_tokens, **kwargs)
if not spaces_between_special_tokens:
text = text.replace('<unk> ', '<unk>')
return text
def build_inputs_with_special_tokens(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]:
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. An FNet sequence has the following format:
- single sequence: `[CLS] X [SEP]`
- pair of sequences: `[CLS] A [SEP] B [SEP]`
Args:
token_ids_0 (`List[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
"""
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if token_ids_1 is None:
return cls + token_ids_0 + sep
return cls + token_ids_0 + sep + token_ids_1 + sep
def get_special_tokens_mask(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None, already_has_special_tokens: bool=False) -> list[int]:
"""
Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer `prepare_for_model` method.
Args:
token_ids_0 (`List[int]`):
List of IDs.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
already_has_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not the token list is already formatted with special tokens for the model.
Returns:
`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
"""
if already_has_special_tokens:
return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True)
if token_ids_1 is not None:
return [1] + [0] * len(token_ids_0) + [1] + [0] * len(token_ids_1) + [1]
return [1] + [0] * len(token_ids_0) + [1]
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]:
if not os.path.isdir(save_directory):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
out_vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file, out_vocab_file)
elif not os.path.isfile(self.vocab_file):
with open(out_vocab_file, 'wb') as fi:
content_spiece_model = self.sp_model.serialized_model_proto()
fi.write(content_spiece_model)
return (out_vocab_file,)
|
@requires(backends=('sentencepiece',))
class FNetTokenizer(PreTrainedTokenizer):
'''
Construct an FNet tokenizer. Adapted from [`AlbertTokenizer`]. Based on
[SentencePiece](https://github.com/google/sentencepiece). This tokenizer inherits from [`PreTrainedTokenizer`]
which contains most of the main methods. Users should refer to this superclass for more information regarding those
methods.
Args:
vocab_file (`str`):
[SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that
contains the vocabulary necessary to instantiate a tokenizer.
do_lower_case (`bool`, *optional*, defaults to `False`):
Whether or not to lowercase the input when tokenizing.
remove_space (`bool`, *optional*, defaults to `True`):
Whether or not to strip the text when tokenizing (removing excess spaces before and after the string).
keep_accents (`bool`, *optional*, defaults to `True`):
Whether or not to keep accents when tokenizing.
unk_token (`str`, *optional*, defaults to `"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
sep_token (`str`, *optional*, defaults to `"[SEP]"`):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
sequence classification or for a text and a question for question answering. It is also used as the last
token of a sequence built with special tokens.
pad_token (`str`, *optional*, defaults to `"<pad>"`):
The token used for padding, for example when batching sequences of different lengths.
cls_token (`str`, *optional*, defaults to `"[CLS]"`):
The classifier token which is used when doing sequence classification (classification of the whole sequence
instead of per-token classification). It is the first token of the sequence when built with special tokens.
mask_token (`str`, *optional*, defaults to `"[MASK]"`):
The token used for masking values. This is the token used when training this model with masked language
modeling. This is the token which the model will try to predict.
sp_model_kwargs (`dict`, *optional*):
Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for
SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things,
to set:
- `enable_sampling`: Enable subword regularization.
- `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout.
- `nbest_size = {0,1}`: No sampling is performed.
- `nbest_size > 1`: samples from the nbest_size results.
- `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice)
using forward-filtering-and-backward-sampling algorithm.
- `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for
BPE-dropout.
Attributes:
sp_model (`SentencePieceProcessor`):
The *SentencePiece* processor that is used for every conversion (string, tokens and IDs).
'''
def __init__(self, vocab_file, do_lower_case=False, remove_space=True, keep_accents=True, unk_token='<unk>', sep_token='[SEP]', pad_token='<pad>', cls_token='[CLS]', mask_token='[MASK]', sp_model_kwargs: Optional[dict[str, Any]]=None, **kwargs) -> None:
pass
@property
def vocab_size(self):
pass
def get_vocab(self):
pass
def __getstate__(self):
pass
def __setstate__(self, d):
pass
def preprocess_text(self, inputs):
pass
def _tokenize(self, text: str) -> list[str]:
'''Tokenize a string.'''
pass
def _convert_token_to_id(self, token):
'''Converts a token (str) in an id using the vocab.'''
pass
def _convert_id_to_token(self, index):
'''Converts an index (integer) in a token (str) using the vocab.'''
pass
def convert_tokens_to_string(self, tokens):
'''Converts a sequence of tokens (string) in a single string.'''
pass
def _decode(self, token_ids: list[int], skip_special_tokens: bool=False, clean_up_tokenization_spaces: Optional[bool]=None, spaces_between_special_tokens: bool=False, **kwargs) -> str:
pass
def build_inputs_with_special_tokens(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]:
'''
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. An FNet sequence has the following format:
- single sequence: `[CLS] X [SEP]`
- pair of sequences: `[CLS] A [SEP] B [SEP]`
Args:
token_ids_0 (`List[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
'''
pass
def get_special_tokens_mask(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None, already_has_special_tokens: bool=False) -> list[int]:
'''
Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer `prepare_for_model` method.
Args:
token_ids_0 (`List[int]`):
List of IDs.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
already_has_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not the token list is already formatted with special tokens for the model.
Returns:
`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
'''
pass
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]:
pass
| 17
| 7
| 16
| 1
| 11
| 3
| 3
| 0.6
| 1
| 6
| 0
| 0
| 15
| 7
| 15
| 104
| 304
| 42
| 164
| 71
| 121
| 98
| 111
| 43
| 95
| 6
| 3
| 4
| 40
|
2,595
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/fnet/tokenization_fnet_fast.py
|
transformers.models.fnet.tokenization_fnet_fast.FNetTokenizerFast
|
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from shutil import copyfile
import os
from typing import Optional
class FNetTokenizerFast(PreTrainedTokenizerFast):
"""
Construct a "fast" FNetTokenizer (backed by HuggingFace's *tokenizers* library). Adapted from
[`AlbertTokenizerFast`]. Based on
[Unigram](https://huggingface.co/docs/tokenizers/python/latest/components.html?highlight=unigram#models). This
tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should refer to
this superclass for more information regarding those methods
Args:
vocab_file (`str`):
[SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that
contains the vocabulary necessary to instantiate a tokenizer.
do_lower_case (`bool`, *optional*, defaults to `False`):
Whether or not to lowercase the input when tokenizing.
remove_space (`bool`, *optional*, defaults to `True`):
Whether or not to strip the text when tokenizing (removing excess spaces before and after the string).
keep_accents (`bool`, *optional*, defaults to `True`):
Whether or not to keep accents when tokenizing.
unk_token (`str`, *optional*, defaults to `"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
sep_token (`str`, *optional*, defaults to `"[SEP]"`):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
sequence classification or for a text and a question for question answering. It is also used as the last
token of a sequence built with special tokens.
pad_token (`str`, *optional*, defaults to `"<pad>"`):
The token used for padding, for example when batching sequences of different lengths.
cls_token (`str`, *optional*, defaults to `"[CLS]"`):
The classifier token which is used when doing sequence classification (classification of the whole sequence
instead of per-token classification). It is the first token of the sequence when built with special tokens.
mask_token (`str`, *optional*, defaults to `"[MASK]"`):
The token used for masking values. This is the token used when training this model with masked language
modeling. This is the token which the model will try to predict.
"""
vocab_files_names = VOCAB_FILES_NAMES
model_input_names = ['input_ids', 'token_type_ids']
slow_tokenizer_class = FNetTokenizer
def __init__(self, vocab_file=None, tokenizer_file=None, do_lower_case=False, remove_space=True, keep_accents=True, unk_token='<unk>', sep_token='[SEP]', pad_token='<pad>', cls_token='[CLS]', mask_token='[MASK]', **kwargs):
mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token
cls_token = AddedToken(cls_token, lstrip=False, rstrip=False) if isinstance(cls_token, str) else cls_token
sep_token = AddedToken(sep_token, lstrip=False, rstrip=False) if isinstance(sep_token, str) else sep_token
super().__init__(vocab_file, tokenizer_file=tokenizer_file, do_lower_case=do_lower_case, remove_space=remove_space, keep_accents=keep_accents, unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, **kwargs)
self.do_lower_case = do_lower_case
self.remove_space = remove_space
self.keep_accents = keep_accents
self.vocab_file = vocab_file
def build_inputs_with_special_tokens(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]:
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. An FNet sequence has the following format:
- single sequence: `[CLS] X [SEP]`
- pair of sequences: `[CLS] A [SEP] B [SEP]`
Args:
token_ids_0 (`List[int]`):
List of IDs to which the special tokens will be added
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: list of [input IDs](../glossary#input-ids) with the appropriate special tokens.
"""
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if token_ids_1 is None:
return cls + token_ids_0 + sep
return cls + token_ids_0 + sep + token_ids_1 + sep
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]:
if not os.path.isdir(save_directory):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
out_vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file):
copyfile(self.vocab_file, out_vocab_file)
return (out_vocab_file,)
|
class FNetTokenizerFast(PreTrainedTokenizerFast):
'''
Construct a "fast" FNetTokenizer (backed by HuggingFace's *tokenizers* library). Adapted from
[`AlbertTokenizerFast`]. Based on
[Unigram](https://huggingface.co/docs/tokenizers/python/latest/components.html?highlight=unigram#models). This
tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should refer to
this superclass for more information regarding those methods
Args:
vocab_file (`str`):
[SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that
contains the vocabulary necessary to instantiate a tokenizer.
do_lower_case (`bool`, *optional*, defaults to `False`):
Whether or not to lowercase the input when tokenizing.
remove_space (`bool`, *optional*, defaults to `True`):
Whether or not to strip the text when tokenizing (removing excess spaces before and after the string).
keep_accents (`bool`, *optional*, defaults to `True`):
Whether or not to keep accents when tokenizing.
unk_token (`str`, *optional*, defaults to `"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
sep_token (`str`, *optional*, defaults to `"[SEP]"`):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
sequence classification or for a text and a question for question answering. It is also used as the last
token of a sequence built with special tokens.
pad_token (`str`, *optional*, defaults to `"<pad>"`):
The token used for padding, for example when batching sequences of different lengths.
cls_token (`str`, *optional*, defaults to `"[CLS]"`):
The classifier token which is used when doing sequence classification (classification of the whole sequence
instead of per-token classification). It is the first token of the sequence when built with special tokens.
mask_token (`str`, *optional*, defaults to `"[MASK]"`):
The token used for masking values. This is the token used when training this model with masked language
modeling. This is the token which the model will try to predict.
'''
def __init__(self, vocab_file=None, tokenizer_file=None, do_lower_case=False, remove_space=True, keep_accents=True, unk_token='<unk>', sep_token='[SEP]', pad_token='<pad>', cls_token='[CLS]', mask_token='[MASK]', **kwargs):
pass
def build_inputs_with_special_tokens(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]:
'''
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. An FNet sequence has the following format:
- single sequence: `[CLS] X [SEP]`
- pair of sequences: `[CLS] A [SEP] B [SEP]`
Args:
token_ids_0 (`List[int]`):
List of IDs to which the special tokens will be added
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: list of [input IDs](../glossary#input-ids) with the appropriate special tokens.
'''
pass
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]:
pass
| 4
| 2
| 21
| 2
| 12
| 6
| 3
| 0.94
| 1
| 4
| 0
| 0
| 5
| 4
| 5
| 93
| 149
| 19
| 67
| 36
| 43
| 63
| 35
| 18
| 29
| 4
| 3
| 1
| 14
|
2,596
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/focalnet/configuration_focalnet.py
|
transformers.models.focalnet.configuration_focalnet.FocalNetConfig
|
from ...configuration_utils import PretrainedConfig
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
class FocalNetConfig(BackboneConfigMixin, PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`FocalNetModel`]. It is used to instantiate a
FocalNet model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the FocalNet
[microsoft/focalnet-tiny](https://huggingface.co/microsoft/focalnet-tiny) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
image_size (`int`, *optional*, defaults to 224):
The size (resolution) of each image.
patch_size (`int`, *optional*, defaults to 4):
The size (resolution) of each patch in the embeddings layer.
num_channels (`int`, *optional*, defaults to 3):
The number of input channels.
embed_dim (`int`, *optional*, defaults to 96):
Dimensionality of patch embedding.
use_conv_embed (`bool`, *optional*, defaults to `False`):
Whether to use convolutional embedding. The authors noted that using convolutional embedding usually
improve the performance, but it's not used by default.
hidden_sizes (`list[int]`, *optional*, defaults to `[192, 384, 768, 768]`):
Dimensionality (hidden size) at each stage.
depths (`list(int)`, *optional*, defaults to `[2, 2, 6, 2]`):
Depth (number of layers) of each stage in the encoder.
focal_levels (`list(int)`, *optional*, defaults to `[2, 2, 2, 2]`):
Number of focal levels in each layer of the respective stages in the encoder.
focal_windows (`list(int)`, *optional*, defaults to `[3, 3, 3, 3]`):
Focal window size in each layer of the respective stages in the encoder.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder. If string, `"gelu"`, `"relu"`,
`"selu"` and `"gelu_new"` are supported.
mlp_ratio (`float`, *optional*, defaults to 4.0):
Ratio of MLP hidden dimensionality to embedding dimensionality.
hidden_dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout probability for all fully connected layers in the embeddings and encoder.
drop_path_rate (`float`, *optional*, defaults to 0.1):
Stochastic depth rate.
use_layerscale (`bool`, *optional*, defaults to `False`):
Whether to use layer scale in the encoder.
layerscale_value (`float`, *optional*, defaults to 0.0001):
The initial value of the layer scale.
use_post_layernorm (`bool`, *optional*, defaults to `False`):
Whether to use post layer normalization in the encoder.
use_post_layernorm_in_modulation (`bool`, *optional*, defaults to `False`):
Whether to use post layer normalization in the modulation layer.
normalize_modulator (`bool`, *optional*, defaults to `False`):
Whether to normalize the modulator.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the layer normalization layers.
encoder_stride (`int`, *optional*, defaults to 32):
Factor to increase the spatial resolution by in the decoder head for masked image modeling.
out_features (`list[str]`, *optional*):
If used as backbone, list of features to output. Can be any of `"stem"`, `"stage1"`, `"stage2"`, etc.
(depending on how many stages the model has). If unset and `out_indices` is set, will default to the
corresponding stages. If unset and `out_indices` is unset, will default to the last stage. Must be in the
same order as defined in the `stage_names` attribute.
out_indices (`list[int]`, *optional*):
If used as backbone, list of indices of features to output. Can be any of 0, 1, 2, etc. (depending on how
many stages the model has). If unset and `out_features` is set, will default to the corresponding stages.
If unset and `out_features` is unset, will default to the last stage. Must be in the
same order as defined in the `stage_names` attribute.
Example:
```python
>>> from transformers import FocalNetConfig, FocalNetModel
>>> # Initializing a FocalNet microsoft/focalnet-tiny style configuration
>>> configuration = FocalNetConfig()
>>> # Initializing a model (with random weights) from the microsoft/focalnet-tiny style configuration
>>> model = FocalNetModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = 'focalnet'
def __init__(self, image_size=224, patch_size=4, num_channels=3, embed_dim=96, use_conv_embed=False, hidden_sizes=[192, 384, 768, 768], depths=[2, 2, 6, 2], focal_levels=[2, 2, 2, 2], focal_windows=[3, 3, 3, 3], hidden_act='gelu', mlp_ratio=4.0, hidden_dropout_prob=0.0, drop_path_rate=0.1, use_layerscale=False, layerscale_value=0.0001, use_post_layernorm=False, use_post_layernorm_in_modulation=False, normalize_modulator=False, initializer_range=0.02, layer_norm_eps=1e-05, encoder_stride=32, out_features=None, out_indices=None, **kwargs):
super().__init__(**kwargs)
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.embed_dim = embed_dim
self.use_conv_embed = use_conv_embed
self.hidden_sizes = hidden_sizes
self.depths = depths
self.focal_levels = focal_levels
self.focal_windows = focal_windows
self.hidden_act = hidden_act
self.mlp_ratio = mlp_ratio
self.hidden_dropout_prob = hidden_dropout_prob
self.drop_path_rate = drop_path_rate
self.use_layerscale = use_layerscale
self.layerscale_value = layerscale_value
self.use_post_layernorm = use_post_layernorm
self.use_post_layernorm_in_modulation = use_post_layernorm_in_modulation
self.normalize_modulator = normalize_modulator
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.encoder_stride = encoder_stride
self.stage_names = ['stem'] + [f'stage{idx}' for idx in range(1, len(self.depths) + 1)]
self._out_features, self._out_indices = get_aligned_output_features_output_indices(out_features=out_features, out_indices=out_indices, stage_names=self.stage_names)
|
class FocalNetConfig(BackboneConfigMixin, PretrainedConfig):
'''
This is the configuration class to store the configuration of a [`FocalNetModel`]. It is used to instantiate a
FocalNet model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the FocalNet
[microsoft/focalnet-tiny](https://huggingface.co/microsoft/focalnet-tiny) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
image_size (`int`, *optional*, defaults to 224):
The size (resolution) of each image.
patch_size (`int`, *optional*, defaults to 4):
The size (resolution) of each patch in the embeddings layer.
num_channels (`int`, *optional*, defaults to 3):
The number of input channels.
embed_dim (`int`, *optional*, defaults to 96):
Dimensionality of patch embedding.
use_conv_embed (`bool`, *optional*, defaults to `False`):
Whether to use convolutional embedding. The authors noted that using convolutional embedding usually
improve the performance, but it's not used by default.
hidden_sizes (`list[int]`, *optional*, defaults to `[192, 384, 768, 768]`):
Dimensionality (hidden size) at each stage.
depths (`list(int)`, *optional*, defaults to `[2, 2, 6, 2]`):
Depth (number of layers) of each stage in the encoder.
focal_levels (`list(int)`, *optional*, defaults to `[2, 2, 2, 2]`):
Number of focal levels in each layer of the respective stages in the encoder.
focal_windows (`list(int)`, *optional*, defaults to `[3, 3, 3, 3]`):
Focal window size in each layer of the respective stages in the encoder.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder. If string, `"gelu"`, `"relu"`,
`"selu"` and `"gelu_new"` are supported.
mlp_ratio (`float`, *optional*, defaults to 4.0):
Ratio of MLP hidden dimensionality to embedding dimensionality.
hidden_dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout probability for all fully connected layers in the embeddings and encoder.
drop_path_rate (`float`, *optional*, defaults to 0.1):
Stochastic depth rate.
use_layerscale (`bool`, *optional*, defaults to `False`):
Whether to use layer scale in the encoder.
layerscale_value (`float`, *optional*, defaults to 0.0001):
The initial value of the layer scale.
use_post_layernorm (`bool`, *optional*, defaults to `False`):
Whether to use post layer normalization in the encoder.
use_post_layernorm_in_modulation (`bool`, *optional*, defaults to `False`):
Whether to use post layer normalization in the modulation layer.
normalize_modulator (`bool`, *optional*, defaults to `False`):
Whether to normalize the modulator.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the layer normalization layers.
encoder_stride (`int`, *optional*, defaults to 32):
Factor to increase the spatial resolution by in the decoder head for masked image modeling.
out_features (`list[str]`, *optional*):
If used as backbone, list of features to output. Can be any of `"stem"`, `"stage1"`, `"stage2"`, etc.
(depending on how many stages the model has). If unset and `out_indices` is set, will default to the
corresponding stages. If unset and `out_indices` is unset, will default to the last stage. Must be in the
same order as defined in the `stage_names` attribute.
out_indices (`list[int]`, *optional*):
If used as backbone, list of indices of features to output. Can be any of 0, 1, 2, etc. (depending on how
many stages the model has). If unset and `out_features` is set, will default to the corresponding stages.
If unset and `out_features` is unset, will default to the last stage. Must be in the
same order as defined in the `stage_names` attribute.
Example:
```python
>>> from transformers import FocalNetConfig, FocalNetModel
>>> # Initializing a FocalNet microsoft/focalnet-tiny style configuration
>>> configuration = FocalNetConfig()
>>> # Initializing a model (with random weights) from the microsoft/focalnet-tiny style configuration
>>> model = FocalNetModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```'''
def __init__(self, image_size=224, patch_size=4, num_channels=3, embed_dim=96, use_conv_embed=False, hidden_sizes=[192, 384, 768, 768], depths=[2, 2, 6, 2], focal_levels=[2, 2, 2, 2], focal_windows=[3, 3, 3, 3], hidden_act='gelu', mlp_ratio=4.0, hidden_dropout_prob=0.0, drop_path_rate=0.1, use_layerscale=False, layerscale_value=0.0001, use_post_layernorm=False, use_post_layernorm_in_modulation=False, normalize_modulator=False, initializer_range=0.02, layer_norm_eps=1e-05, encoder_stride=32, out_features=None, out_indices=None, **kwargs):
pass
| 2
| 1
| 54
| 1
| 53
| 0
| 1
| 1.31
| 2
| 2
| 0
| 0
| 1
| 24
| 1
| 6
| 137
| 10
| 55
| 52
| 27
| 72
| 27
| 26
| 25
| 1
| 1
| 0
| 1
|
2,597
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/focalnet/modeling_focalnet.py
|
transformers.models.focalnet.modeling_focalnet.FocalNetBackbone
|
import torch
from ...utils import ModelOutput, auto_docstring, logging
from ...modeling_outputs import BackboneOutput
from .configuration_focalnet import FocalNetConfig
from typing import Optional, Union
from ...utils.backbone_utils import BackboneMixin
@auto_docstring(custom_intro='\n FocalNet backbone, to be used with frameworks like X-Decoder.\n ')
class FocalNetBackbone(FocalNetPreTrainedModel, BackboneMixin):
has_attentions = False
def __init__(self, config: FocalNetConfig):
super().__init__(config)
super()._init_backbone(config)
self.num_features = [config.embed_dim] + config.hidden_sizes
self.focalnet = FocalNetModel(config)
self.post_init()
@auto_docstring
def forward(self, pixel_values: torch.Tensor, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> BackboneOutput:
"""
Examples:
```python
>>> from transformers import AutoImageProcessor, AutoBackbone
>>> import torch
>>> from PIL import Image
>>> import requests
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> processor = AutoImageProcessor.from_pretrained("microsoft/focalnet-tiny-lrf")
>>> model = AutoBackbone.from_pretrained("microsoft/focalnet-tiny-lrf")
>>> inputs = processor(image, return_tensors="pt")
>>> outputs = model(**inputs)
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
outputs = self.focalnet(pixel_values, output_hidden_states=True, return_dict=True)
hidden_states = outputs.reshaped_hidden_states
feature_maps = ()
for idx, stage in enumerate(self.stage_names):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
output = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(feature_maps=feature_maps, hidden_states=outputs.hidden_states if output_hidden_states else None, attentions=None)
|
@auto_docstring(custom_intro='\n FocalNet backbone, to be used with frameworks like X-Decoder.\n ')
class FocalNetBackbone(FocalNetPreTrainedModel, BackboneMixin):
def __init__(self, config: FocalNetConfig):
pass
@auto_docstring
def forward(self, pixel_values: torch.Tensor, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> BackboneOutput:
'''
Examples:
```python
>>> from transformers import AutoImageProcessor, AutoBackbone
>>> import torch
>>> from PIL import Image
>>> import requests
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> processor = AutoImageProcessor.from_pretrained("microsoft/focalnet-tiny-lrf")
>>> model = AutoBackbone.from_pretrained("microsoft/focalnet-tiny-lrf")
>>> inputs = processor(image, return_tensors="pt")
>>> outputs = model(**inputs)
```'''
pass
| 5
| 1
| 30
| 6
| 16
| 8
| 5
| 0.46
| 2
| 7
| 3
| 0
| 2
| 2
| 2
| 15
| 64
| 13
| 35
| 16
| 25
| 16
| 22
| 10
| 19
| 8
| 2
| 2
| 9
|
2,598
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/focalnet/modeling_focalnet.py
|
transformers.models.focalnet.modeling_focalnet.FocalNetDropPath
|
from torch import nn
import torch
from typing import Optional, Union
class FocalNetDropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
def __init__(self, drop_prob: Optional[float]=None) -> None:
super().__init__()
self.drop_prob = drop_prob
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
return drop_path(hidden_states, self.drop_prob, self.training)
def extra_repr(self) -> str:
return f'p={self.drop_prob}'
|
class FocalNetDropPath(nn.Module):
'''Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).'''
def __init__(self, drop_prob: Optional[float]=None) -> None:
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
def extra_repr(self) -> str:
pass
| 4
| 1
| 2
| 0
| 2
| 0
| 1
| 0.13
| 1
| 4
| 0
| 0
| 3
| 1
| 3
| 13
| 12
| 3
| 8
| 5
| 4
| 1
| 8
| 5
| 4
| 1
| 1
| 0
| 3
|
2,599
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/focalnet/modeling_focalnet.py
|
transformers.models.focalnet.modeling_focalnet.FocalNetEmbeddings
|
from typing import Optional, Union
import torch
from torch import nn
class FocalNetEmbeddings(nn.Module):
"""
Construct the patch embeddings and layernorm. Optionally, also the mask token.
"""
def __init__(self, config, use_mask_token=False):
super().__init__()
self.patch_embeddings = FocalNetPatchEmbeddings(config=config, image_size=config.image_size, patch_size=config.patch_size, num_channels=config.num_channels, embed_dim=config.embed_dim, use_conv_embed=config.use_conv_embed, is_stem=True)
self.patch_grid = self.patch_embeddings.grid_size
self.mask_token = nn.Parameter(torch.zeros(1, 1, config.embed_dim)) if use_mask_token else None
self.norm = nn.LayerNorm(config.embed_dim, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, pixel_values: Optional[torch.FloatTensor], bool_masked_pos: Optional[torch.BoolTensor]=None) -> tuple[torch.Tensor]:
embeddings, output_dimensions = self.patch_embeddings(pixel_values)
embeddings = self.norm(embeddings)
batch_size, seq_len, _ = embeddings.size()
if bool_masked_pos is not None:
mask_tokens = self.mask_token.expand(batch_size, seq_len, -1)
mask = bool_masked_pos.unsqueeze(-1).type_as(mask_tokens)
embeddings = embeddings * (1.0 - mask) + mask_tokens * mask
embeddings = self.dropout(embeddings)
return (embeddings, output_dimensions)
|
class FocalNetEmbeddings(nn.Module):
'''
Construct the patch embeddings and layernorm. Optionally, also the mask token.
'''
def __init__(self, config, use_mask_token=False):
pass
def forward(self, pixel_values: Optional[torch.FloatTensor], bool_masked_pos: Optional[torch.BoolTensor]=None) -> tuple[torch.Tensor]:
pass
| 3
| 1
| 16
| 2
| 14
| 1
| 2
| 0.14
| 1
| 3
| 1
| 0
| 2
| 5
| 2
| 12
| 38
| 6
| 28
| 14
| 23
| 4
| 18
| 12
| 15
| 2
| 1
| 1
| 4
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.