id
int64 0
328k
| repository_name
stringlengths 7
58
| file_path
stringlengths 9
302
| class_name
stringlengths 5
256
| human_written_code
stringlengths 16
2.16M
| class_skeleton
stringlengths 18
1.49M
⌀ | total_program_units
int64 1
1.76k
| total_doc_str
int64 0
771
| AvgCountLine
float64 0
7.89k
| AvgCountLineBlank
float64 0
297
| AvgCountLineCode
float64 0
7.89k
| AvgCountLineComment
float64 0
7.89k
| AvgCyclomatic
float64 0
130
| CommentToCodeRatio
float64 0
168
| CountClassBase
float64 0
40
| CountClassCoupled
float64 0
583
| CountClassCoupledModified
float64 0
575
| CountClassDerived
float64 0
5.35k
| CountDeclInstanceMethod
float64 0
529
| CountDeclInstanceVariable
float64 0
296
| CountDeclMethod
float64 0
599
| CountDeclMethodAll
float64 0
1.12k
| CountLine
float64 1
40.4k
| CountLineBlank
float64 0
8.16k
| CountLineCode
float64 1
25.7k
| CountLineCodeDecl
float64 1
8.15k
| CountLineCodeExe
float64 0
24.2k
| CountLineComment
float64 0
16.5k
| CountStmt
float64 1
9.71k
| CountStmtDecl
float64 1
8.15k
| CountStmtExe
float64 0
9.69k
| MaxCyclomatic
float64 0
759
| MaxInheritanceTree
float64 0
16
| MaxNesting
float64 0
34
| SumCyclomatic
float64 0
2.9k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2,200
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/distilbert/modeling_distilbert.py
|
transformers.models.distilbert.modeling_distilbert.DistilBertPreTrainedModel
|
from ...modeling_utils import PreTrainedModel
from .configuration_distilbert import DistilBertConfig
from ...utils import auto_docstring, logging
from torch import nn
@auto_docstring
class DistilBertPreTrainedModel(PreTrainedModel):
config: DistilBertConfig
base_model_prefix = 'distilbert'
supports_gradient_checkpointing = True
_supports_flash_attn = True
_supports_sdpa = True
def _init_weights(self, module: nn.Module):
"""Initialize the weights."""
if isinstance(module, nn.Linear):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
elif isinstance(module, Embeddings) and self.config.sinusoidal_pos_embds:
create_sinusoidal_embeddings(self.config.max_position_embeddings, self.config.dim, module.position_embeddings.weight)
|
@auto_docstring
class DistilBertPreTrainedModel(PreTrainedModel):
def _init_weights(self, module: nn.Module):
'''Initialize the weights.'''
pass
| 3
| 1
| 19
| 0
| 16
| 3
| 7
| 0.3
| 1
| 1
| 1
| 6
| 1
| 0
| 1
| 1
| 32
| 2
| 23
| 8
| 21
| 7
| 18
| 8
| 16
| 7
| 1
| 2
| 7
|
2,201
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/distilbert/modeling_distilbert.py
|
transformers.models.distilbert.modeling_distilbert.DistilBertSdpaAttention
|
from ...configuration_utils import PretrainedConfig
import torch
from typing import Optional, Union
class DistilBertSdpaAttention(MultiHeadSelfAttention):
def __init__(self, config: PretrainedConfig):
super().__init__(config=config)
self.dropout_prob = config.attention_dropout
def forward(self, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, mask: torch.Tensor, head_mask: Optional[torch.Tensor]=None, output_attentions: bool=False) -> tuple[torch.Tensor, ...]:
"""
Parameters:
query: torch.tensor(bs, seq_length, dim)
key: torch.tensor(bs, seq_length, dim)
value: torch.tensor(bs, seq_length, dim)
mask: torch.tensor(bs, seq_length)
Returns:
weights: torch.tensor(bs, n_heads, seq_length, seq_length) Attention weights context: torch.tensor(bs,
seq_length, dim) Contextualized layer. Optional: only if `output_attentions=True`
"""
if output_attentions or head_mask is not None:
logger.warning_once('DistilBertSdpaAttention is used but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True` or `head_mask`. Falling back to the manual attention implementation, but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.')
return super().forward(query, key, value, mask, head_mask, output_attentions)
batch_size, _, _ = query.size()
dim_per_head = self.dim // self.n_heads
def shape(x: torch.Tensor) -> torch.Tensor:
"""separate heads"""
return x.view(batch_size, -1, self.n_heads, dim_per_head).transpose(1, 2)
def unshape(x: torch.Tensor) -> torch.Tensor:
"""group heads"""
return x.transpose(1, 2).contiguous().view(batch_size, -1, self.n_heads * dim_per_head)
q = shape(self.q_lin(query))
k = shape(self.k_lin(key))
v = shape(self.v_lin(value))
attn_output = torch.nn.functional.scaled_dot_product_attention(q, k, v, attn_mask=mask, dropout_p=self.dropout_prob if self.training else 0.0, is_causal=False)
attn_output = unshape(attn_output)
attn_output = self.out_lin(attn_output)
return (attn_output,)
|
class DistilBertSdpaAttention(MultiHeadSelfAttention):
def __init__(self, config: PretrainedConfig):
pass
def forward(self, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, mask: torch.Tensor, head_mask: Optional[torch.Tensor]=None, output_attentions: bool=False) -> tuple[torch.Tensor, ...]:
'''
Parameters:
query: torch.tensor(bs, seq_length, dim)
key: torch.tensor(bs, seq_length, dim)
value: torch.tensor(bs, seq_length, dim)
mask: torch.tensor(bs, seq_length)
Returns:
weights: torch.tensor(bs, n_heads, seq_length, seq_length) Attention weights context: torch.tensor(bs,
seq_length, dim) Contextualized layer. Optional: only if `output_attentions=True`
'''
pass
def shape(x: torch.Tensor) -> torch.Tensor:
'''separate heads'''
pass
def unshape(x: torch.Tensor) -> torch.Tensor:
'''group heads'''
pass
| 5
| 3
| 21
| 2
| 14
| 5
| 2
| 0.34
| 1
| 3
| 0
| 0
| 2
| 2
| 2
| 15
| 78
| 10
| 53
| 21
| 40
| 18
| 26
| 13
| 21
| 4
| 2
| 1
| 7
|
2,202
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/distilbert/modeling_distilbert.py
|
transformers.models.distilbert.modeling_distilbert.Embeddings
|
from torch import nn
from ...configuration_utils import PretrainedConfig
from typing import Optional, Union
import torch
class Embeddings(nn.Module):
def __init__(self, config: PretrainedConfig):
super().__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.dim, padding_idx=config.pad_token_id)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.dim)
self.LayerNorm = nn.LayerNorm(config.dim, eps=1e-12)
self.dropout = nn.Dropout(config.dropout)
self.register_buffer('position_ids', torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False)
def forward(self, input_ids: torch.Tensor, input_embeds: Optional[torch.Tensor]=None) -> torch.Tensor:
"""
Parameters:
input_ids (torch.Tensor):
torch.tensor(bs, max_seq_length) The token ids to embed.
input_embeds (*optional*, torch.Tensor):
The pre-computed word embeddings. Can only be passed if the input ids are `None`.
Returns: torch.tensor(bs, max_seq_length, dim) The embedded tokens (plus position embeddings, no token_type
embeddings)
"""
if input_ids is not None:
input_embeds = self.word_embeddings(input_ids)
seq_length = input_embeds.size(1)
if hasattr(self, 'position_ids'):
position_ids = self.position_ids[:, :seq_length]
else:
position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
position_embeddings = self.position_embeddings(position_ids)
embeddings = input_embeds + position_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
|
class Embeddings(nn.Module):
def __init__(self, config: PretrainedConfig):
pass
def forward(self, input_ids: torch.Tensor, input_embeds: Optional[torch.Tensor]=None) -> torch.Tensor:
'''
Parameters:
input_ids (torch.Tensor):
torch.tensor(bs, max_seq_length) The token ids to embed.
input_embeds (*optional*, torch.Tensor):
The pre-computed word embeddings. Can only be passed if the input ids are `None`.
Returns: torch.tensor(bs, max_seq_length, dim) The embedded tokens (plus position embeddings, no token_type
embeddings)
'''
pass
| 3
| 1
| 21
| 4
| 12
| 10
| 2
| 0.79
| 1
| 2
| 0
| 0
| 2
| 4
| 2
| 12
| 44
| 8
| 24
| 11
| 21
| 19
| 21
| 11
| 18
| 3
| 1
| 1
| 4
|
2,203
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/distilbert/modeling_distilbert.py
|
transformers.models.distilbert.modeling_distilbert.FFN
|
import torch
from ...configuration_utils import PretrainedConfig
from ...activations import get_activation
from torch import nn
from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
class FFN(nn.Module):
def __init__(self, config: PretrainedConfig):
super().__init__()
self.dropout = nn.Dropout(p=config.dropout)
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.lin1 = nn.Linear(in_features=config.dim, out_features=config.hidden_dim)
self.lin2 = nn.Linear(in_features=config.hidden_dim, out_features=config.dim)
self.activation = get_activation(config.activation)
def forward(self, input: torch.Tensor) -> torch.Tensor:
return apply_chunking_to_forward(self.ff_chunk, self.chunk_size_feed_forward, self.seq_len_dim, input)
def ff_chunk(self, input: torch.Tensor) -> torch.Tensor:
x = self.lin1(input)
x = self.activation(x)
x = self.lin2(x)
x = self.dropout(x)
return x
|
class FFN(nn.Module):
def __init__(self, config: PretrainedConfig):
pass
def forward(self, input: torch.Tensor) -> torch.Tensor:
pass
def ff_chunk(self, input: torch.Tensor) -> torch.Tensor:
pass
| 4
| 0
| 5
| 0
| 5
| 0
| 1
| 0
| 1
| 2
| 0
| 0
| 3
| 6
| 3
| 13
| 19
| 2
| 17
| 11
| 13
| 0
| 17
| 11
| 13
| 1
| 1
| 0
| 3
|
2,204
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/distilbert/modeling_distilbert.py
|
transformers.models.distilbert.modeling_distilbert.MultiHeadSelfAttention
|
from ...configuration_utils import PretrainedConfig
import torch
from typing import Optional, Union
import math
from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
from torch import nn
class MultiHeadSelfAttention(nn.Module):
def __init__(self, config: PretrainedConfig):
super().__init__()
self.config = config
self.n_heads = config.n_heads
self.dim = config.dim
self.dropout = nn.Dropout(p=config.attention_dropout)
self.is_causal = False
if self.dim % self.n_heads != 0:
raise ValueError(f'self.n_heads: {self.n_heads} must divide self.dim: {self.dim} evenly')
self.q_lin = nn.Linear(in_features=config.dim, out_features=config.dim)
self.k_lin = nn.Linear(in_features=config.dim, out_features=config.dim)
self.v_lin = nn.Linear(in_features=config.dim, out_features=config.dim)
self.out_lin = nn.Linear(in_features=config.dim, out_features=config.dim)
self.pruned_heads: set[int] = set()
self.attention_head_size = self.dim // self.n_heads
def prune_heads(self, heads: list[int]):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(heads, self.n_heads, self.attention_head_size, self.pruned_heads)
self.q_lin = prune_linear_layer(self.q_lin, index)
self.k_lin = prune_linear_layer(self.k_lin, index)
self.v_lin = prune_linear_layer(self.v_lin, index)
self.out_lin = prune_linear_layer(self.out_lin, index, dim=1)
self.n_heads = self.n_heads - len(heads)
self.dim = self.attention_head_size * self.n_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(self, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, mask: torch.Tensor, head_mask: Optional[torch.Tensor]=None, output_attentions: bool=False) -> tuple[torch.Tensor, ...]:
"""
Parameters:
query: torch.tensor(bs, seq_length, dim)
key: torch.tensor(bs, seq_length, dim)
value: torch.tensor(bs, seq_length, dim)
mask: torch.tensor(bs, seq_length)
Returns:
weights: torch.tensor(bs, n_heads, seq_length, seq_length) Attention weights context: torch.tensor(bs,
seq_length, dim) Contextualized layer. Optional: only if `output_attentions=True`
"""
bs, q_length, dim = query.size()
k_length = key.size(1)
dim_per_head = self.dim // self.n_heads
mask_reshp = (bs, 1, 1, k_length)
def shape(x: torch.Tensor) -> torch.Tensor:
"""separate heads"""
return x.view(bs, -1, self.n_heads, dim_per_head).transpose(1, 2)
def unshape(x: torch.Tensor) -> torch.Tensor:
"""group heads"""
return x.transpose(1, 2).contiguous().view(bs, -1, self.n_heads * dim_per_head)
q = shape(self.q_lin(query))
k = shape(self.k_lin(key))
v = shape(self.v_lin(value))
q = q / math.sqrt(dim_per_head)
scores = torch.matmul(q, k.transpose(2, 3))
mask = (mask == 0).view(mask_reshp).expand_as(scores)
scores = scores.masked_fill(mask, torch.tensor(torch.finfo(scores.dtype).min))
weights = nn.functional.softmax(scores, dim=-1)
weights = self.dropout(weights)
if head_mask is not None:
weights = weights * head_mask
context = torch.matmul(weights, v)
context = unshape(context)
context = self.out_lin(context)
if output_attentions:
return (context, weights)
else:
return (context,)
|
class MultiHeadSelfAttention(nn.Module):
def __init__(self, config: PretrainedConfig):
pass
def prune_heads(self, heads: list[int]):
pass
def forward(self, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, mask: torch.Tensor, head_mask: Optional[torch.Tensor]=None, output_attentions: bool=False) -> tuple[torch.Tensor, ...]:
'''
Parameters:
query: torch.tensor(bs, seq_length, dim)
key: torch.tensor(bs, seq_length, dim)
value: torch.tensor(bs, seq_length, dim)
mask: torch.tensor(bs, seq_length)
Returns:
weights: torch.tensor(bs, n_heads, seq_length, seq_length) Attention weights context: torch.tensor(bs,
seq_length, dim) Contextualized layer. Optional: only if `output_attentions=True`
'''
pass
def shape(x: torch.Tensor) -> torch.Tensor:
'''separate heads'''
pass
def unshape(x: torch.Tensor) -> torch.Tensor:
'''group heads'''
pass
| 6
| 3
| 21
| 3
| 14
| 7
| 2
| 0.47
| 1
| 6
| 0
| 2
| 3
| 11
| 3
| 13
| 102
| 17
| 66
| 36
| 52
| 31
| 53
| 28
| 47
| 3
| 1
| 1
| 9
|
2,205
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/distilbert/modeling_distilbert.py
|
transformers.models.distilbert.modeling_distilbert.Transformer
|
from ...modeling_outputs import BaseModelOutput, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput
from typing import Optional, Union
from torch import nn
import torch
from ...configuration_utils import PretrainedConfig
class Transformer(nn.Module):
def __init__(self, config: PretrainedConfig):
super().__init__()
self.n_layers = config.n_layers
self.layer = nn.ModuleList([TransformerBlock(config) for _ in range(config.n_layers)])
self.gradient_checkpointing = False
def forward(self, x: torch.Tensor, attn_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: Optional[bool]=None) -> Union[BaseModelOutput, tuple[torch.Tensor, ...]]:
"""
Parameters:
x: torch.tensor(bs, seq_length, dim) Input sequence embedded.
attn_mask: torch.tensor(bs, seq_length) Attention mask on the sequence.
Returns:
hidden_state: torch.tensor(bs, seq_length, dim) Sequence of hidden states in the last (top)
layer all_hidden_states: tuple[torch.tensor(bs, seq_length, dim)]
Tuple of length n_layers with the hidden states from each layer.
Optional: only if output_hidden_states=True
all_attentions: tuple[torch.tensor(bs, n_heads, seq_length, seq_length)]
Tuple of length n_layers with the attention weights from each layer
Optional: only if output_attentions=True
"""
all_hidden_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
hidden_state = x
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_state,)
layer_outputs = layer_module(hidden_state, attn_mask, head_mask[i], output_attentions)
hidden_state = layer_outputs[-1]
if output_attentions:
if len(layer_outputs) != 2:
raise ValueError(f'The length of the layer_outputs should be 2, but it is {len(layer_outputs)}')
attentions = layer_outputs[0]
all_attentions = all_attentions + (attentions,)
elif len(layer_outputs) != 1:
raise ValueError(f'The length of the layer_outputs should be 1, but it is {len(layer_outputs)}')
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_state,)
if not return_dict:
return tuple((v for v in [hidden_state, all_hidden_states, all_attentions] if v is not None))
return BaseModelOutput(last_hidden_state=hidden_state, hidden_states=all_hidden_states, attentions=all_attentions)
|
class Transformer(nn.Module):
def __init__(self, config: PretrainedConfig):
pass
def forward(self, x: torch.Tensor, attn_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: Optional[bool]=None) -> Union[BaseModelOutput, tuple[torch.Tensor, ...]]:
'''
Parameters:
x: torch.tensor(bs, seq_length, dim) Input sequence embedded.
attn_mask: torch.tensor(bs, seq_length) Attention mask on the sequence.
Returns:
hidden_state: torch.tensor(bs, seq_length, dim) Sequence of hidden states in the last (top)
layer all_hidden_states: tuple[torch.tensor(bs, seq_length, dim)]
Tuple of length n_layers with the hidden states from each layer.
Optional: only if output_hidden_states=True
all_attentions: tuple[torch.tensor(bs, n_heads, seq_length, seq_length)]
Tuple of length n_layers with the attention weights from each layer
Optional: only if output_attentions=True
'''
pass
| 3
| 1
| 37
| 4
| 26
| 8
| 6
| 0.29
| 1
| 9
| 2
| 0
| 2
| 3
| 2
| 12
| 75
| 9
| 52
| 20
| 41
| 15
| 29
| 12
| 26
| 11
| 1
| 3
| 12
|
2,206
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/distilbert/modeling_distilbert.py
|
transformers.models.distilbert.modeling_distilbert.TransformerBlock
|
from torch import nn
from ...modeling_layers import GradientCheckpointingLayer
import torch
from typing import Optional, Union
from ...configuration_utils import PretrainedConfig
class TransformerBlock(GradientCheckpointingLayer):
def __init__(self, config: PretrainedConfig):
super().__init__()
if config.dim % config.n_heads != 0:
raise ValueError(f'config.n_heads {config.n_heads} must divide config.dim {config.dim} evenly')
self.attention = DISTILBERT_ATTENTION_CLASSES[config._attn_implementation](config)
self.sa_layer_norm = nn.LayerNorm(normalized_shape=config.dim, eps=1e-12)
self.ffn = FFN(config)
self.output_layer_norm = nn.LayerNorm(normalized_shape=config.dim, eps=1e-12)
def forward(self, x: torch.Tensor, attn_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, output_attentions: bool=False) -> tuple[torch.Tensor, ...]:
"""
Parameters:
x: torch.tensor(bs, seq_length, dim)
attn_mask: torch.tensor(bs, seq_length)
Returns:
sa_weights: torch.tensor(bs, n_heads, seq_length, seq_length) The attention weights ffn_output:
torch.tensor(bs, seq_length, dim) The output of the transformer block contextualization.
"""
sa_output = self.attention(query=x, key=x, value=x, mask=attn_mask, head_mask=head_mask, output_attentions=output_attentions)
if output_attentions:
sa_output, sa_weights = sa_output
else:
if type(sa_output) is not tuple:
raise TypeError(f'sa_output must be a tuple but it is {type(sa_output)} type')
sa_output = sa_output[0]
sa_output = self.sa_layer_norm(sa_output + x)
ffn_output = self.ffn(sa_output)
ffn_output: torch.Tensor = self.output_layer_norm(ffn_output + sa_output)
output = (ffn_output,)
if output_attentions:
output = (sa_weights,) + output
return output
|
class TransformerBlock(GradientCheckpointingLayer):
def __init__(self, config: PretrainedConfig):
pass
def forward(self, x: torch.Tensor, attn_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, output_attentions: bool=False) -> tuple[torch.Tensor, ...]:
'''
Parameters:
x: torch.tensor(bs, seq_length, dim)
attn_mask: torch.tensor(bs, seq_length)
Returns:
sa_weights: torch.tensor(bs, n_heads, seq_length, seq_length) The attention weights ffn_output:
torch.tensor(bs, seq_length, dim) The output of the transformer block contextualization.
'''
pass
| 3
| 1
| 27
| 4
| 18
| 8
| 3
| 0.43
| 1
| 8
| 1
| 0
| 2
| 4
| 2
| 12
| 56
| 8
| 37
| 17
| 28
| 16
| 23
| 11
| 20
| 4
| 1
| 2
| 6
|
2,207
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/distilbert/tokenization_distilbert.py
|
transformers.models.distilbert.tokenization_distilbert.DistilBertTokenizer
|
from typing import Optional
import os
from ...tokenization_utils import PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace
import collections
class DistilBertTokenizer(PreTrainedTokenizer):
"""
Construct a DistilBERT tokenizer. Based on WordPiece.
This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
this superclass for more information regarding those methods.
Args:
vocab_file (`str`):
File containing the vocabulary.
do_lower_case (`bool`, *optional*, defaults to `True`):
Whether or not to lowercase the input when tokenizing.
do_basic_tokenize (`bool`, *optional*, defaults to `True`):
Whether or not to do basic tokenization before WordPiece.
never_split (`Iterable`, *optional*):
Collection of tokens which will never be split during tokenization. Only has an effect when
`do_basic_tokenize=True`
unk_token (`str`, *optional*, defaults to `"[UNK]"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
sep_token (`str`, *optional*, defaults to `"[SEP]"`):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
sequence classification or for a text and a question for question answering. It is also used as the last
token of a sequence built with special tokens.
pad_token (`str`, *optional*, defaults to `"[PAD]"`):
The token used for padding, for example when batching sequences of different lengths.
cls_token (`str`, *optional*, defaults to `"[CLS]"`):
The classifier token which is used when doing sequence classification (classification of the whole sequence
instead of per-token classification). It is the first token of the sequence when built with special tokens.
mask_token (`str`, *optional*, defaults to `"[MASK]"`):
The token used for masking values. This is the token used when training this model with masked language
modeling. This is the token which the model will try to predict.
tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
Whether or not to tokenize Chinese characters.
This should likely be deactivated for Japanese (see this
[issue](https://github.com/huggingface/transformers/issues/328)).
strip_accents (`bool`, *optional*):
Whether or not to strip all accents. If this option is not specified, then it will be determined by the
value for `lowercase` (as in the original BERT).
clean_up_tokenization_spaces (`bool`, *optional*, defaults to `True`):
Whether or not to cleanup spaces after decoding, cleanup consists in removing potential artifacts like
extra spaces.
"""
vocab_files_names = VOCAB_FILES_NAMES
model_input_names = ['input_ids', 'attention_mask']
def __init__(self, vocab_file, do_lower_case=True, do_basic_tokenize=True, never_split=None, unk_token='[UNK]', sep_token='[SEP]', pad_token='[PAD]', cls_token='[CLS]', mask_token='[MASK]', tokenize_chinese_chars=True, strip_accents=None, clean_up_tokenization_spaces=True, **kwargs):
if not os.path.isfile(vocab_file):
raise ValueError(f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained model use `tokenizer = DistilBertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`")
self.vocab = load_vocab(vocab_file)
self.ids_to_tokens = collections.OrderedDict([(ids, tok) for tok, ids in self.vocab.items()])
self.do_basic_tokenize = do_basic_tokenize
if do_basic_tokenize:
self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case, never_split=never_split, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents)
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=str(unk_token))
super().__init__(do_lower_case=do_lower_case, do_basic_tokenize=do_basic_tokenize, never_split=never_split, unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents, clean_up_tokenization_spaces=clean_up_tokenization_spaces, **kwargs)
@property
def do_lower_case(self):
return self.basic_tokenizer.do_lower_case
@property
def vocab_size(self):
return len(self.vocab)
def get_vocab(self):
return dict(self.vocab, **self.added_tokens_encoder)
def _tokenize(self, text, split_special_tokens=False):
split_tokens = []
if self.do_basic_tokenize:
for token in self.basic_tokenizer.tokenize(text, never_split=self.all_special_tokens if not split_special_tokens else None):
if token in self.basic_tokenizer.never_split:
split_tokens.append(token)
else:
split_tokens += self.wordpiece_tokenizer.tokenize(token)
else:
split_tokens = self.wordpiece_tokenizer.tokenize(text)
return split_tokens
def _convert_token_to_id(self, token):
"""Converts a token (str) in an id using the vocab."""
return self.vocab.get(token, self.vocab.get(self.unk_token))
def _convert_id_to_token(self, index):
"""Converts an index (integer) in a token (str) using the vocab."""
return self.ids_to_tokens.get(index, self.unk_token)
def convert_tokens_to_string(self, tokens):
"""Converts a sequence of tokens (string) in a single string."""
out_string = ' '.join(tokens).replace(' ##', '').strip()
return out_string
def build_inputs_with_special_tokens(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]:
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. A BERT sequence has the following format:
- single sequence: `[CLS] X [SEP]`
- pair of sequences: `[CLS] A [SEP] B [SEP]`
Args:
token_ids_0 (`List[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
"""
if token_ids_1 is None:
return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
cls = [self.cls_token_id]
sep = [self.sep_token_id]
return cls + token_ids_0 + sep + token_ids_1 + sep
def get_special_tokens_mask(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None, already_has_special_tokens: bool=False) -> list[int]:
"""
Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer `prepare_for_model` method.
Args:
token_ids_0 (`List[int]`):
List of IDs.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
already_has_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not the token list is already formatted with special tokens for the model.
Returns:
`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
"""
if already_has_special_tokens:
return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True)
if token_ids_1 is not None:
return [1] + [0] * len(token_ids_0) + [1] + [0] * len(token_ids_1) + [1]
return [1] + [0] * len(token_ids_0) + [1]
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]:
index = 0
if os.path.isdir(save_directory):
vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
else:
vocab_file = (filename_prefix + '-' if filename_prefix else '') + save_directory
with open(vocab_file, 'w', encoding='utf-8') as writer:
for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]):
if index != token_index:
logger.warning(f'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive. Please check that the vocabulary is not corrupted!')
index = token_index
writer.write(token + '\n')
index += 1
return (vocab_file,)
|
class DistilBertTokenizer(PreTrainedTokenizer):
'''
Construct a DistilBERT tokenizer. Based on WordPiece.
This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
this superclass for more information regarding those methods.
Args:
vocab_file (`str`):
File containing the vocabulary.
do_lower_case (`bool`, *optional*, defaults to `True`):
Whether or not to lowercase the input when tokenizing.
do_basic_tokenize (`bool`, *optional*, defaults to `True`):
Whether or not to do basic tokenization before WordPiece.
never_split (`Iterable`, *optional*):
Collection of tokens which will never be split during tokenization. Only has an effect when
`do_basic_tokenize=True`
unk_token (`str`, *optional*, defaults to `"[UNK]"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
sep_token (`str`, *optional*, defaults to `"[SEP]"`):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
sequence classification or for a text and a question for question answering. It is also used as the last
token of a sequence built with special tokens.
pad_token (`str`, *optional*, defaults to `"[PAD]"`):
The token used for padding, for example when batching sequences of different lengths.
cls_token (`str`, *optional*, defaults to `"[CLS]"`):
The classifier token which is used when doing sequence classification (classification of the whole sequence
instead of per-token classification). It is the first token of the sequence when built with special tokens.
mask_token (`str`, *optional*, defaults to `"[MASK]"`):
The token used for masking values. This is the token used when training this model with masked language
modeling. This is the token which the model will try to predict.
tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
Whether or not to tokenize Chinese characters.
This should likely be deactivated for Japanese (see this
[issue](https://github.com/huggingface/transformers/issues/328)).
strip_accents (`bool`, *optional*):
Whether or not to strip all accents. If this option is not specified, then it will be determined by the
value for `lowercase` (as in the original BERT).
clean_up_tokenization_spaces (`bool`, *optional*, defaults to `True`):
Whether or not to cleanup spaces after decoding, cleanup consists in removing potential artifacts like
extra spaces.
'''
def __init__(self, vocab_file, do_lower_case=True, do_basic_tokenize=True, never_split=None, unk_token='[UNK]', sep_token='[SEP]', pad_token='[PAD]', cls_token='[CLS]', mask_token='[MASK]', tokenize_chinese_chars=True, strip_accents=None, clean_up_tokenization_spaces=True, **kwargs):
pass
@property
def do_lower_case(self):
pass
@property
def vocab_size(self):
pass
def get_vocab(self):
pass
def _tokenize(self, text, split_special_tokens=False):
pass
def _convert_token_to_id(self, token):
'''Converts a token (str) in an id using the vocab.'''
pass
def _convert_id_to_token(self, index):
'''Converts an index (integer) in a token (str) using the vocab.'''
pass
def convert_tokens_to_string(self, tokens):
'''Converts a sequence of tokens (string) in a single string.'''
pass
def build_inputs_with_special_tokens(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]:
'''
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. A BERT sequence has the following format:
- single sequence: `[CLS] X [SEP]`
- pair of sequences: `[CLS] A [SEP] B [SEP]`
Args:
token_ids_0 (`List[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
'''
pass
def get_special_tokens_mask(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None, already_has_special_tokens: bool=False) -> list[int]:
'''
Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer `prepare_for_model` method.
Args:
token_ids_0 (`List[int]`):
List of IDs.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
already_has_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not the token list is already formatted with special tokens for the model.
Returns:
`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
'''
pass
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]:
pass
| 14
| 6
| 15
| 1
| 10
| 4
| 2
| 0.8
| 1
| 9
| 2
| 0
| 12
| 5
| 12
| 101
| 247
| 28
| 122
| 54
| 86
| 98
| 66
| 30
| 53
| 6
| 3
| 3
| 27
|
2,208
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/distilbert/tokenization_distilbert_fast.py
|
transformers.models.distilbert.tokenization_distilbert_fast.DistilBertTokenizerFast
|
from tokenizers import normalizers
from .tokenization_distilbert import DistilBertTokenizer
from typing import Optional
from ...tokenization_utils_fast import PreTrainedTokenizerFast
import json
class DistilBertTokenizerFast(PreTrainedTokenizerFast):
"""
Construct a "fast" DistilBERT tokenizer (backed by HuggingFace's *tokenizers* library). Based on WordPiece.
This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
refer to this superclass for more information regarding those methods.
Args:
vocab_file (`str`):
File containing the vocabulary.
do_lower_case (`bool`, *optional*, defaults to `True`):
Whether or not to lowercase the input when tokenizing.
unk_token (`str`, *optional*, defaults to `"[UNK]"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
sep_token (`str`, *optional*, defaults to `"[SEP]"`):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
sequence classification or for a text and a question for question answering. It is also used as the last
token of a sequence built with special tokens.
pad_token (`str`, *optional*, defaults to `"[PAD]"`):
The token used for padding, for example when batching sequences of different lengths.
cls_token (`str`, *optional*, defaults to `"[CLS]"`):
The classifier token which is used when doing sequence classification (classification of the whole sequence
instead of per-token classification). It is the first token of the sequence when built with special tokens.
mask_token (`str`, *optional*, defaults to `"[MASK]"`):
The token used for masking values. This is the token used when training this model with masked language
modeling. This is the token which the model will try to predict.
clean_text (`bool`, *optional*, defaults to `True`):
Whether or not to clean the text before tokenization by removing any control characters and replacing all
whitespaces by the classic one.
tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see [this
issue](https://github.com/huggingface/transformers/issues/328)).
strip_accents (`bool`, *optional*):
Whether or not to strip all accents. If this option is not specified, then it will be determined by the
value for `lowercase` (as in the original BERT).
wordpieces_prefix (`str`, *optional*, defaults to `"##"`):
The prefix for subwords.
"""
vocab_files_names = VOCAB_FILES_NAMES
model_input_names = ['input_ids', 'attention_mask']
slow_tokenizer_class = DistilBertTokenizer
def __init__(self, vocab_file=None, tokenizer_file=None, do_lower_case=True, unk_token='[UNK]', sep_token='[SEP]', pad_token='[PAD]', cls_token='[CLS]', mask_token='[MASK]', tokenize_chinese_chars=True, strip_accents=None, **kwargs):
super().__init__(vocab_file, tokenizer_file=tokenizer_file, do_lower_case=do_lower_case, unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents, **kwargs)
normalizer_state = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if normalizer_state.get('lowercase', do_lower_case) != do_lower_case or normalizer_state.get('strip_accents', strip_accents) != strip_accents or normalizer_state.get('handle_chinese_chars', tokenize_chinese_chars) != tokenize_chinese_chars:
normalizer_class = getattr(normalizers, normalizer_state.pop('type'))
normalizer_state['lowercase'] = do_lower_case
normalizer_state['strip_accents'] = strip_accents
normalizer_state['handle_chinese_chars'] = tokenize_chinese_chars
self.backend_tokenizer.normalizer = normalizer_class(**normalizer_state)
self.do_lower_case = do_lower_case
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. A BERT sequence has the following format:
- single sequence: `[CLS] X [SEP]`
- pair of sequences: `[CLS] A [SEP] B [SEP]`
Args:
token_ids_0 (`List[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
"""
output = [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
if token_ids_1 is not None:
output += token_ids_1 + [self.sep_token_id]
return output
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]:
files = self._tokenizer.model.save(save_directory, name=filename_prefix)
return tuple(files)
|
class DistilBertTokenizerFast(PreTrainedTokenizerFast):
'''
Construct a "fast" DistilBERT tokenizer (backed by HuggingFace's *tokenizers* library). Based on WordPiece.
This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
refer to this superclass for more information regarding those methods.
Args:
vocab_file (`str`):
File containing the vocabulary.
do_lower_case (`bool`, *optional*, defaults to `True`):
Whether or not to lowercase the input when tokenizing.
unk_token (`str`, *optional*, defaults to `"[UNK]"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
sep_token (`str`, *optional*, defaults to `"[SEP]"`):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
sequence classification or for a text and a question for question answering. It is also used as the last
token of a sequence built with special tokens.
pad_token (`str`, *optional*, defaults to `"[PAD]"`):
The token used for padding, for example when batching sequences of different lengths.
cls_token (`str`, *optional*, defaults to `"[CLS]"`):
The classifier token which is used when doing sequence classification (classification of the whole sequence
instead of per-token classification). It is the first token of the sequence when built with special tokens.
mask_token (`str`, *optional*, defaults to `"[MASK]"`):
The token used for masking values. This is the token used when training this model with masked language
modeling. This is the token which the model will try to predict.
clean_text (`bool`, *optional*, defaults to `True`):
Whether or not to clean the text before tokenization by removing any control characters and replacing all
whitespaces by the classic one.
tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see [this
issue](https://github.com/huggingface/transformers/issues/328)).
strip_accents (`bool`, *optional*):
Whether or not to strip all accents. If this option is not specified, then it will be determined by the
value for `lowercase` (as in the original BERT).
wordpieces_prefix (`str`, *optional*, defaults to `"##"`):
The prefix for subwords.
'''
def __init__(self, vocab_file=None, tokenizer_file=None, do_lower_case=True, unk_token='[UNK]', sep_token='[SEP]', pad_token='[PAD]', cls_token='[CLS]', mask_token='[MASK]', tokenize_chinese_chars=True, strip_accents=None, **kwargs):
pass
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
'''
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. A BERT sequence has the following format:
- single sequence: `[CLS] X [SEP]`
- pair of sequences: `[CLS] A [SEP] B [SEP]`
Args:
token_ids_0 (`List[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
'''
pass
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]:
pass
| 4
| 2
| 24
| 3
| 14
| 7
| 2
| 1.15
| 1
| 4
| 0
| 0
| 4
| 1
| 4
| 92
| 145
| 18
| 59
| 30
| 39
| 68
| 28
| 15
| 23
| 2
| 3
| 1
| 7
|
2,209
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/donut/configuration_donut_swin.py
|
transformers.models.donut.configuration_donut_swin.DonutSwinConfig
|
from ...configuration_utils import PretrainedConfig
class DonutSwinConfig(PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`DonutSwinModel`]. It is used to instantiate a
Donut model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the Donut
[naver-clova-ix/donut-base](https://huggingface.co/naver-clova-ix/donut-base) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
image_size (`int`, *optional*, defaults to 224):
The size (resolution) of each image.
patch_size (`int`, *optional*, defaults to 4):
The size (resolution) of each patch.
num_channels (`int`, *optional*, defaults to 3):
The number of input channels.
embed_dim (`int`, *optional*, defaults to 96):
Dimensionality of patch embedding.
depths (`list(int)`, *optional*, defaults to `[2, 2, 6, 2]`):
Depth of each layer in the Transformer encoder.
num_heads (`list(int)`, *optional*, defaults to `[3, 6, 12, 24]`):
Number of attention heads in each layer of the Transformer encoder.
window_size (`int`, *optional*, defaults to 7):
Size of windows.
mlp_ratio (`float`, *optional*, defaults to 4.0):
Ratio of MLP hidden dimensionality to embedding dimensionality.
qkv_bias (`bool`, *optional*, defaults to `True`):
Whether or not a learnable bias should be added to the queries, keys and values.
hidden_dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout probability for all fully connected layers in the embeddings and encoder.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
drop_path_rate (`float`, *optional*, defaults to 0.1):
Stochastic depth rate.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder. If string, `"gelu"`, `"relu"`,
`"selu"` and `"gelu_new"` are supported.
use_absolute_embeddings (`bool`, *optional*, defaults to `False`):
Whether or not to add absolute position embeddings to the patch embeddings.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the layer normalization layers.
Example:
```python
>>> from transformers import DonutSwinConfig, DonutSwinModel
>>> # Initializing a Donut naver-clova-ix/donut-base style configuration
>>> configuration = DonutSwinConfig()
>>> # Randomly initializing a model from the naver-clova-ix/donut-base style configuration
>>> model = DonutSwinModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = 'donut-swin'
attribute_map = {'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'}
def __init__(self, image_size=224, patch_size=4, num_channels=3, embed_dim=96, depths=[2, 2, 6, 2], num_heads=[3, 6, 12, 24], window_size=7, mlp_ratio=4.0, qkv_bias=True, hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, drop_path_rate=0.1, hidden_act='gelu', use_absolute_embeddings=False, initializer_range=0.02, layer_norm_eps=1e-05, **kwargs):
super().__init__(**kwargs)
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.embed_dim = embed_dim
self.depths = depths
self.num_layers = len(depths)
self.num_heads = num_heads
self.window_size = window_size
self.mlp_ratio = mlp_ratio
self.qkv_bias = qkv_bias
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.drop_path_rate = drop_path_rate
self.hidden_act = hidden_act
self.use_absolute_embeddings = use_absolute_embeddings
self.layer_norm_eps = layer_norm_eps
self.initializer_range = initializer_range
self.hidden_size = int(embed_dim * 2 ** (len(depths) - 1))
|
class DonutSwinConfig(PretrainedConfig):
'''
This is the configuration class to store the configuration of a [`DonutSwinModel`]. It is used to instantiate a
Donut model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the Donut
[naver-clova-ix/donut-base](https://huggingface.co/naver-clova-ix/donut-base) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
image_size (`int`, *optional*, defaults to 224):
The size (resolution) of each image.
patch_size (`int`, *optional*, defaults to 4):
The size (resolution) of each patch.
num_channels (`int`, *optional*, defaults to 3):
The number of input channels.
embed_dim (`int`, *optional*, defaults to 96):
Dimensionality of patch embedding.
depths (`list(int)`, *optional*, defaults to `[2, 2, 6, 2]`):
Depth of each layer in the Transformer encoder.
num_heads (`list(int)`, *optional*, defaults to `[3, 6, 12, 24]`):
Number of attention heads in each layer of the Transformer encoder.
window_size (`int`, *optional*, defaults to 7):
Size of windows.
mlp_ratio (`float`, *optional*, defaults to 4.0):
Ratio of MLP hidden dimensionality to embedding dimensionality.
qkv_bias (`bool`, *optional*, defaults to `True`):
Whether or not a learnable bias should be added to the queries, keys and values.
hidden_dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout probability for all fully connected layers in the embeddings and encoder.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
drop_path_rate (`float`, *optional*, defaults to 0.1):
Stochastic depth rate.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder. If string, `"gelu"`, `"relu"`,
`"selu"` and `"gelu_new"` are supported.
use_absolute_embeddings (`bool`, *optional*, defaults to `False`):
Whether or not to add absolute position embeddings to the patch embeddings.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the layer normalization layers.
Example:
```python
>>> from transformers import DonutSwinConfig, DonutSwinModel
>>> # Initializing a Donut naver-clova-ix/donut-base style configuration
>>> configuration = DonutSwinConfig()
>>> # Randomly initializing a model from the naver-clova-ix/donut-base style configuration
>>> model = DonutSwinModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```'''
def __init__(self, image_size=224, patch_size=4, num_channels=3, embed_dim=96, depths=[2, 2, 6, 2], num_heads=[3, 6, 12, 24], window_size=7, mlp_ratio=4.0, qkv_bias=True, hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, drop_path_rate=0.1, hidden_act='gelu', use_absolute_embeddings=False, initializer_range=0.02, layer_norm_eps=1e-05, **kwargs):
pass
| 2
| 1
| 42
| 1
| 39
| 2
| 1
| 1.18
| 1
| 2
| 0
| 0
| 1
| 18
| 1
| 1
| 109
| 11
| 45
| 41
| 24
| 53
| 23
| 22
| 21
| 1
| 1
| 0
| 1
|
2,210
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/donut/feature_extraction_donut.py
|
transformers.models.donut.feature_extraction_donut.DonutFeatureExtractor
|
from ...utils.import_utils import requires
from .image_processing_donut import DonutImageProcessor
import warnings
@requires(backends=('vision',))
class DonutFeatureExtractor(DonutImageProcessor):
def __init__(self, *args, **kwargs) -> None:
warnings.warn('The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please use DonutImageProcessor instead.', FutureWarning)
super().__init__(*args, **kwargs)
|
@requires(backends=('vision',))
class DonutFeatureExtractor(DonutImageProcessor):
def __init__(self, *args, **kwargs) -> None:
pass
| 3
| 0
| 7
| 0
| 7
| 0
| 1
| 0
| 1
| 2
| 0
| 0
| 1
| 0
| 1
| 28
| 8
| 0
| 8
| 2
| 6
| 0
| 4
| 2
| 2
| 1
| 4
| 0
| 1
|
2,211
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/donut/image_processing_donut.py
|
transformers.models.donut.image_processing_donut.DonutImageProcessor
|
from typing import Optional, Union
from ...image_transforms import convert_to_rgb, get_resize_output_image_size, pad, resize, to_channel_dimension_format
from ...utils import TensorType, filter_out_non_signature_kwargs, logging
from ...image_utils import IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, get_image_size, infer_channel_dimension_format, is_scaled_image, make_flat_list_of_images, to_numpy_array, valid_images, validate_preprocess_arguments
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...utils.import_utils import is_vision_available, requires
@requires(backends=('vision',))
class DonutImageProcessor(BaseImageProcessor):
"""
Constructs a Donut image processor.
Args:
do_resize (`bool`, *optional*, defaults to `True`):
Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by
`do_resize` in the `preprocess` method.
size (`dict[str, int]` *optional*, defaults to `{"shortest_edge": 224}`):
Size of the image after resizing. The shortest edge of the image is resized to size["shortest_edge"], with
the longest edge resized to keep the input aspect ratio. Can be overridden by `size` in the `preprocess`
method.
resample (`PILImageResampling`, *optional*, defaults to `Resampling.BILINEAR`):
Resampling filter to use if resizing the image. Can be overridden by `resample` in the `preprocess` method.
do_thumbnail (`bool`, *optional*, defaults to `True`):
Whether to resize the image using thumbnail method.
do_align_long_axis (`bool`, *optional*, defaults to `False`):
Whether to align the long axis of the image with the long axis of `size` by rotating by 90 degrees.
do_pad (`bool`, *optional*, defaults to `True`):
Whether to pad the image. If `random_padding` is set to `True` in `preprocess`, each image is padded with a
random amount of padding on each size, up to the largest image size in the batch. Otherwise, all images are
padded to the largest image size in the batch.
do_rescale (`bool`, *optional*, defaults to `True`):
Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by `do_rescale` in
the `preprocess` method.
rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
Scale factor to use if rescaling the image. Can be overridden by `rescale_factor` in the `preprocess`
method.
do_normalize (`bool`, *optional*, defaults to `True`):
Whether to normalize the image. Can be overridden by `do_normalize` in the `preprocess` method.
image_mean (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`):
Mean to use if normalizing the image. This is a float or list of floats the length of the number of
channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
image_std (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`):
Image standard deviation.
"""
model_input_names = ['pixel_values']
def __init__(self, do_resize: bool=True, size: Optional[dict[str, int]]=None, resample: PILImageResampling=PILImageResampling.BILINEAR, do_thumbnail: bool=True, do_align_long_axis: bool=False, do_pad: bool=True, do_rescale: bool=True, rescale_factor: Union[int, float]=1 / 255, do_normalize: bool=True, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, **kwargs) -> None:
super().__init__(**kwargs)
size = size if size is not None else {'height': 2560, 'width': 1920}
if isinstance(size, (tuple, list)):
size = size[::-1]
size = get_size_dict(size)
self.do_resize = do_resize
self.size = size
self.resample = resample
self.do_thumbnail = do_thumbnail
self.do_align_long_axis = do_align_long_axis
self.do_pad = do_pad
self.do_rescale = do_rescale
self.rescale_factor = rescale_factor
self.do_normalize = do_normalize
self.image_mean = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
self.image_std = image_std if image_std is not None else IMAGENET_STANDARD_STD
def align_long_axis(self, image: np.ndarray, size: dict[str, int], data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray:
"""
Align the long axis of the image to the longest axis of the specified size.
Args:
image (`np.ndarray`):
The image to be aligned.
size (`dict[str, int]`):
The size `{"height": h, "width": w}` to align the long axis to.
data_format (`str` or `ChannelDimension`, *optional*):
The data format of the output image. If unset, the same format as the input image is used.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
Returns:
`np.ndarray`: The aligned image.
"""
input_height, input_width = get_image_size(image, channel_dim=input_data_format)
output_height, output_width = (size['height'], size['width'])
if input_data_format is None:
input_data_format = infer_channel_dimension_format(image)
if input_data_format == ChannelDimension.LAST:
rot_axes = (0, 1)
elif input_data_format == ChannelDimension.FIRST:
rot_axes = (1, 2)
else:
raise ValueError(f'Unsupported data format: {input_data_format}')
if output_width < output_height and input_width > input_height or (output_width > output_height and input_width < input_height):
image = np.rot90(image, 3, axes=rot_axes)
if data_format is not None:
image = to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format)
return image
def pad_image(self, image: np.ndarray, size: dict[str, int], random_padding: bool=False, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray:
"""
Pad the image to the specified size.
Args:
image (`np.ndarray`):
The image to be padded.
size (`dict[str, int]`):
The size `{"height": h, "width": w}` to pad the image to.
random_padding (`bool`, *optional*, defaults to `False`):
Whether to use random padding or not.
data_format (`str` or `ChannelDimension`, *optional*):
The data format of the output image. If unset, the same format as the input image is used.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
"""
output_height, output_width = (size['height'], size['width'])
input_height, input_width = get_image_size(image, channel_dim=input_data_format)
delta_width = output_width - input_width
delta_height = output_height - input_height
if random_padding:
pad_top = np.random.randint(low=0, high=delta_height + 1)
pad_left = np.random.randint(low=0, high=delta_width + 1)
else:
pad_top = delta_height // 2
pad_left = delta_width // 2
pad_bottom = delta_height - pad_top
pad_right = delta_width - pad_left
padding = ((pad_top, pad_bottom), (pad_left, pad_right))
return pad(image, padding, data_format=data_format, input_data_format=input_data_format)
def thumbnail(self, image: np.ndarray, size: dict[str, int], resample: PILImageResampling=PILImageResampling.BICUBIC, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray:
"""
Resize the image to make a thumbnail. The image is resized so that no dimension is larger than any
corresponding dimension of the specified size.
Args:
image (`np.ndarray`):
The image to be resized.
size (`dict[str, int]`):
The size `{"height": h, "width": w}` to resize the image to.
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
The resampling filter to use.
data_format (`Optional[Union[str, ChannelDimension]]`, *optional*):
The data format of the output image. If unset, the same format as the input image is used.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
"""
input_height, input_width = get_image_size(image, channel_dim=input_data_format)
output_height, output_width = (size['height'], size['width'])
height = min(input_height, output_height)
width = min(input_width, output_width)
if height == input_height and width == input_width:
return image
if input_height > input_width:
width = int(input_width * height / input_height)
elif input_width > input_height:
height = int(input_height * width / input_width)
return resize(image, size=(height, width), resample=resample, reducing_gap=2.0, data_format=data_format, input_data_format=input_data_format, **kwargs)
def resize(self, image: np.ndarray, size: dict[str, int], resample: PILImageResampling=PILImageResampling.BICUBIC, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray:
"""
Resizes `image` to `(height, width)` specified by `size` using the PIL library.
Args:
image (`np.ndarray`):
Image to resize.
size (`dict[str, int]`):
Size of the output image.
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
Resampling filter to use when resiizing the image.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the image. If not provided, it will be the same as the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
"""
size = get_size_dict(size)
shortest_edge = min(size['height'], size['width'])
output_size = get_resize_output_image_size(image, size=shortest_edge, default_to_square=False, input_data_format=input_data_format)
resized_image = resize(image, size=output_size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs)
return resized_image
@filter_out_non_signature_kwargs()
def preprocess(self, images: ImageInput, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, resample: Optional[PILImageResampling]=None, do_thumbnail: Optional[bool]=None, do_align_long_axis: Optional[bool]=None, do_pad: Optional[bool]=None, random_padding: bool=False, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, return_tensors: Optional[Union[str, TensorType]]=None, data_format: Optional[ChannelDimension]=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> PIL.Image.Image:
"""
Preprocess an image or batch of images.
Args:
images (`ImageInput`):
Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
passing in images with pixel values between 0 and 1, set `do_rescale=False`.
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
Whether to resize the image.
size (`dict[str, int]`, *optional*, defaults to `self.size`):
Size of the image after resizing. Shortest edge of the image is resized to min(size["height"],
size["width"]) with the longest edge resized to keep the input aspect ratio.
resample (`int`, *optional*, defaults to `self.resample`):
Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`. Only
has an effect if `do_resize` is set to `True`.
do_thumbnail (`bool`, *optional*, defaults to `self.do_thumbnail`):
Whether to resize the image using thumbnail method.
do_align_long_axis (`bool`, *optional*, defaults to `self.do_align_long_axis`):
Whether to align the long axis of the image with the long axis of `size` by rotating by 90 degrees.
do_pad (`bool`, *optional*, defaults to `self.do_pad`):
Whether to pad the image. If `random_padding` is set to `True`, each image is padded with a random
amount of padding on each size, up to the largest image size in the batch. Otherwise, all images are
padded to the largest image size in the batch.
random_padding (`bool`, *optional*, defaults to `self.random_padding`):
Whether to use random padding when padding the image. If `True`, each image in the batch with be padded
with a random amount of padding on each side up to the size of the largest image in the batch.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the image pixel values.
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
Whether to normalize the image.
image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`):
Image mean to use for normalization.
image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`):
Image standard deviation to use for normalization.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. Can be one of:
- `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- Unset: defaults to the channel dimension format of the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
"""
do_resize = do_resize if do_resize is not None else self.do_resize
size = size if size is not None else self.size
if isinstance(size, (tuple, list)):
size = size[::-1]
size = get_size_dict(size)
resample = resample if resample is not None else self.resample
do_thumbnail = do_thumbnail if do_thumbnail is not None else self.do_thumbnail
do_align_long_axis = do_align_long_axis if do_align_long_axis is not None else self.do_align_long_axis
do_pad = do_pad if do_pad is not None else self.do_pad
do_rescale = do_rescale if do_rescale is not None else self.do_rescale
rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
do_normalize = do_normalize if do_normalize is not None else self.do_normalize
image_mean = image_mean if image_mean is not None else self.image_mean
image_std = image_std if image_std is not None else self.image_std
images = make_flat_list_of_images(images)
if not valid_images(images):
raise ValueError('Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, or torch.Tensor')
validate_preprocess_arguments(do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_resize=do_resize, size=size, resample=resample)
images = [convert_to_rgb(image) for image in images]
images = [to_numpy_array(image) for image in images]
if do_rescale and is_scaled_image(images[0]):
logger.warning_once('It looks like you are trying to rescale already rescaled images. If the input images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again.')
if input_data_format is None:
input_data_format = infer_channel_dimension_format(images[0])
if do_align_long_axis:
images = [self.align_long_axis(image, size=size, input_data_format=input_data_format) for image in images]
if do_resize:
images = [self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format) for image in images]
if do_thumbnail:
images = [self.thumbnail(image=image, size=size, input_data_format=input_data_format) for image in images]
if do_pad:
images = [self.pad_image(image=image, size=size, random_padding=random_padding, input_data_format=input_data_format) for image in images]
if do_rescale:
images = [self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format) for image in images]
if do_normalize:
images = [self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format) for image in images]
images = [to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images]
data = {'pixel_values': images}
return BatchFeature(data=data, tensor_type=return_tensors)
|
@requires(backends=('vision',))
class DonutImageProcessor(BaseImageProcessor):
'''
Constructs a Donut image processor.
Args:
do_resize (`bool`, *optional*, defaults to `True`):
Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by
`do_resize` in the `preprocess` method.
size (`dict[str, int]` *optional*, defaults to `{"shortest_edge": 224}`):
Size of the image after resizing. The shortest edge of the image is resized to size["shortest_edge"], with
the longest edge resized to keep the input aspect ratio. Can be overridden by `size` in the `preprocess`
method.
resample (`PILImageResampling`, *optional*, defaults to `Resampling.BILINEAR`):
Resampling filter to use if resizing the image. Can be overridden by `resample` in the `preprocess` method.
do_thumbnail (`bool`, *optional*, defaults to `True`):
Whether to resize the image using thumbnail method.
do_align_long_axis (`bool`, *optional*, defaults to `False`):
Whether to align the long axis of the image with the long axis of `size` by rotating by 90 degrees.
do_pad (`bool`, *optional*, defaults to `True`):
Whether to pad the image. If `random_padding` is set to `True` in `preprocess`, each image is padded with a
random amount of padding on each size, up to the largest image size in the batch. Otherwise, all images are
padded to the largest image size in the batch.
do_rescale (`bool`, *optional*, defaults to `True`):
Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by `do_rescale` in
the `preprocess` method.
rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
Scale factor to use if rescaling the image. Can be overridden by `rescale_factor` in the `preprocess`
method.
do_normalize (`bool`, *optional*, defaults to `True`):
Whether to normalize the image. Can be overridden by `do_normalize` in the `preprocess` method.
image_mean (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`):
Mean to use if normalizing the image. This is a float or list of floats the length of the number of
channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
image_std (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`):
Image standard deviation.
'''
def __init__(self, do_resize: bool=True, size: Optional[dict[str, int]]=None, resample: PILImageResampling=PILImageResampling.BILINEAR, do_thumbnail: bool=True, do_align_long_axis: bool=False, do_pad: bool=True, do_rescale: bool=True, rescale_factor: Union[int, float]=1 / 255, do_normalize: bool=True, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, **kwargs) -> None:
pass
def align_long_axis(self, image: np.ndarray, size: dict[str, int], data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray:
'''
Align the long axis of the image to the longest axis of the specified size.
Args:
image (`np.ndarray`):
The image to be aligned.
size (`dict[str, int]`):
The size `{"height": h, "width": w}` to align the long axis to.
data_format (`str` or `ChannelDimension`, *optional*):
The data format of the output image. If unset, the same format as the input image is used.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
Returns:
`np.ndarray`: The aligned image.
'''
pass
def pad_image(self, image: np.ndarray, size: dict[str, int], random_padding: bool=False, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray:
'''
Pad the image to the specified size.
Args:
image (`np.ndarray`):
The image to be padded.
size (`dict[str, int]`):
The size `{"height": h, "width": w}` to pad the image to.
random_padding (`bool`, *optional*, defaults to `False`):
Whether to use random padding or not.
data_format (`str` or `ChannelDimension`, *optional*):
The data format of the output image. If unset, the same format as the input image is used.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
'''
pass
def thumbnail(self, image: np.ndarray, size: dict[str, int], resample: PILImageResampling=PILImageResampling.BICUBIC, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray:
'''
Resize the image to make a thumbnail. The image is resized so that no dimension is larger than any
corresponding dimension of the specified size.
Args:
image (`np.ndarray`):
The image to be resized.
size (`dict[str, int]`):
The size `{"height": h, "width": w}` to resize the image to.
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
The resampling filter to use.
data_format (`Optional[Union[str, ChannelDimension]]`, *optional*):
The data format of the output image. If unset, the same format as the input image is used.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
'''
pass
def resize(self, image: np.ndarray, size: dict[str, int], resample: PILImageResampling=PILImageResampling.BICUBIC, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray:
'''
Resizes `image` to `(height, width)` specified by `size` using the PIL library.
Args:
image (`np.ndarray`):
Image to resize.
size (`dict[str, int]`):
Size of the output image.
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
Resampling filter to use when resiizing the image.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the image. If not provided, it will be the same as the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
'''
pass
@filter_out_non_signature_kwargs()
def preprocess(self, images: ImageInput, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, resample: Optional[PILImageResampling]=None, do_thumbnail: Optional[bool]=None, do_align_long_axis: Optional[bool]=None, do_pad: Optional[bool]=None, random_padding: bool=False, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, return_tensors: Optional[Union[str, TensorType]]=None, data_format: Optional[ChannelDimension]=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> PIL.Image.Image:
'''
Preprocess an image or batch of images.
Args:
images (`ImageInput`):
Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
passing in images with pixel values between 0 and 1, set `do_rescale=False`.
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
Whether to resize the image.
size (`dict[str, int]`, *optional*, defaults to `self.size`):
Size of the image after resizing. Shortest edge of the image is resized to min(size["height"],
size["width"]) with the longest edge resized to keep the input aspect ratio.
resample (`int`, *optional*, defaults to `self.resample`):
Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`. Only
has an effect if `do_resize` is set to `True`.
do_thumbnail (`bool`, *optional*, defaults to `self.do_thumbnail`):
Whether to resize the image using thumbnail method.
do_align_long_axis (`bool`, *optional*, defaults to `self.do_align_long_axis`):
Whether to align the long axis of the image with the long axis of `size` by rotating by 90 degrees.
do_pad (`bool`, *optional*, defaults to `self.do_pad`):
Whether to pad the image. If `random_padding` is set to `True`, each image is padded with a random
amount of padding on each size, up to the largest image size in the batch. Otherwise, all images are
padded to the largest image size in the batch.
random_padding (`bool`, *optional*, defaults to `self.random_padding`):
Whether to use random padding when padding the image. If `True`, each image in the batch with be padded
with a random amount of padding on each side up to the size of the largest image in the batch.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the image pixel values.
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
Whether to normalize the image.
image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`):
Image mean to use for normalization.
image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`):
Image standard deviation to use for normalization.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. Can be one of:
- `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- Unset: defaults to the channel dimension format of the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
'''
pass
| 9
| 6
| 52
| 5
| 30
| 17
| 5
| 0.7
| 1
| 10
| 2
| 1
| 7
| 11
| 7
| 27
| 407
| 41
| 216
| 101
| 146
| 151
| 100
| 39
| 92
| 22
| 3
| 1
| 38
|
2,212
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/donut/modeling_donut_swin.py
|
transformers.models.donut.modeling_donut_swin.DonutSwinAttention
|
from torch import nn
from ...pytorch_utils import find_pruneable_heads_and_indices, meshgrid, prune_linear_layer
import torch
from typing import Optional, Union
class DonutSwinAttention(nn.Module):
def __init__(self, config, dim, num_heads, window_size):
super().__init__()
self.self = DonutSwinSelfAttention(config, dim, num_heads, window_size)
self.output = DonutSwinSelfOutput(config, dim)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads)
self.self.query = prune_linear_layer(self.self.query, index)
self.self.key = prune_linear_layer(self.self.key, index)
self.self.value = prune_linear_layer(self.self.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False) -> tuple[torch.Tensor]:
self_outputs = self.self(hidden_states, attention_mask, head_mask, output_attentions)
attention_output = self.output(self_outputs[0], hidden_states)
outputs = (attention_output,) + self_outputs[1:]
return outputs
|
class DonutSwinAttention(nn.Module):
def __init__(self, config, dim, num_heads, window_size):
pass
def prune_heads(self, heads):
pass
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False) -> tuple[torch.Tensor]:
pass
| 4
| 0
| 11
| 1
| 10
| 1
| 1
| 0.1
| 1
| 6
| 2
| 0
| 3
| 3
| 3
| 13
| 36
| 4
| 30
| 17
| 20
| 3
| 22
| 11
| 18
| 2
| 1
| 1
| 4
|
2,213
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/donut/modeling_donut_swin.py
|
transformers.models.donut.modeling_donut_swin.DonutSwinDropPath
|
import torch
from torch import nn
from typing import Optional, Union
class DonutSwinDropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
def __init__(self, drop_prob: Optional[float]=None) -> None:
super().__init__()
self.drop_prob = drop_prob
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
return drop_path(hidden_states, self.drop_prob, self.training)
def extra_repr(self) -> str:
return f'p={self.drop_prob}'
|
class DonutSwinDropPath(nn.Module):
'''Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).'''
def __init__(self, drop_prob: Optional[float]=None) -> None:
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
def extra_repr(self) -> str:
pass
| 4
| 1
| 2
| 0
| 2
| 0
| 1
| 0.13
| 1
| 4
| 0
| 0
| 3
| 1
| 3
| 13
| 12
| 3
| 8
| 5
| 4
| 1
| 8
| 5
| 4
| 1
| 1
| 0
| 3
|
2,214
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/donut/modeling_donut_swin.py
|
transformers.models.donut.modeling_donut_swin.DonutSwinEmbeddings
|
from torch import nn
from ...utils import ModelOutput, auto_docstring, logging, torch_int
from typing import Optional, Union
import torch
class DonutSwinEmbeddings(nn.Module):
"""
Construct the patch and position embeddings. Optionally, also the mask token.
"""
def __init__(self, config, use_mask_token=False):
super().__init__()
self.patch_embeddings = DonutSwinPatchEmbeddings(config)
num_patches = self.patch_embeddings.num_patches
self.patch_grid = self.patch_embeddings.grid_size
self.mask_token = nn.Parameter(torch.zeros(1, 1, config.embed_dim)) if use_mask_token else None
if config.use_absolute_embeddings:
self.position_embeddings = nn.Parameter(torch.zeros(1, num_patches + 1, config.embed_dim))
else:
self.position_embeddings = None
self.norm = nn.LayerNorm(config.embed_dim)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.patch_size = config.patch_size
self.config = config
def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor:
"""
This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution
images. This method is also adapted to support torch.jit tracing.
Adapted from:
- https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174-L194, and
- https://github.com/facebookresearch/dinov2/blob/e1277af2ba9496fbadf7aec6eba56e8d882d1e35/dinov2/models/vision_transformer.py#L179-L211
"""
num_patches = embeddings.shape[1] - 1
num_positions = self.position_embeddings.shape[1] - 1
if not torch.jit.is_tracing() and num_patches == num_positions and (height == width):
return self.position_embeddings
class_pos_embed = self.position_embeddings[:, :1]
patch_pos_embed = self.position_embeddings[:, 1:]
dim = embeddings.shape[-1]
new_height = height // self.patch_size
new_width = width // self.patch_size
sqrt_num_positions = torch_int(num_positions ** 0.5)
patch_pos_embed = patch_pos_embed.reshape(1, sqrt_num_positions, sqrt_num_positions, dim)
patch_pos_embed = patch_pos_embed.permute(0, 3, 1, 2)
patch_pos_embed = nn.functional.interpolate(patch_pos_embed, size=(new_height, new_width), mode='bicubic', align_corners=False)
patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
return torch.cat((class_pos_embed, patch_pos_embed), dim=1)
def forward(self, pixel_values: Optional[torch.FloatTensor], bool_masked_pos: Optional[torch.BoolTensor]=None, interpolate_pos_encoding: bool=False) -> tuple[torch.Tensor]:
_, num_channels, height, width = pixel_values.shape
embeddings, output_dimensions = self.patch_embeddings(pixel_values)
embeddings = self.norm(embeddings)
batch_size, seq_len, _ = embeddings.size()
if bool_masked_pos is not None:
mask_tokens = self.mask_token.expand(batch_size, seq_len, -1)
mask = bool_masked_pos.unsqueeze(-1).type_as(mask_tokens)
embeddings = embeddings * (1.0 - mask) + mask_tokens * mask
if self.position_embeddings is not None:
if interpolate_pos_encoding:
embeddings = embeddings + self.interpolate_pos_encoding(embeddings, height, width)
else:
embeddings = embeddings + self.position_embeddings
embeddings = self.dropout(embeddings)
return (embeddings, output_dimensions)
|
class DonutSwinEmbeddings(nn.Module):
'''
Construct the patch and position embeddings. Optionally, also the mask token.
'''
def __init__(self, config, use_mask_token=False):
pass
def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor:
'''
This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution
images. This method is also adapted to support torch.jit tracing.
Adapted from:
- https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174-L194, and
- https://github.com/facebookresearch/dinov2/blob/e1277af2ba9496fbadf7aec6eba56e8d882d1e35/dinov2/models/vision_transformer.py#L179-L211
'''
pass
def forward(self, pixel_values: Optional[torch.FloatTensor], bool_masked_pos: Optional[torch.BoolTensor]=None, interpolate_pos_encoding: bool=False) -> tuple[torch.Tensor]:
pass
| 4
| 2
| 27
| 6
| 19
| 3
| 3
| 0.23
| 1
| 5
| 1
| 0
| 3
| 8
| 3
| 13
| 90
| 20
| 57
| 31
| 48
| 13
| 45
| 26
| 41
| 4
| 1
| 2
| 9
|
2,215
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/donut/modeling_donut_swin.py
|
transformers.models.donut.modeling_donut_swin.DonutSwinEncoder
|
import torch
from typing import Optional, Union
from torch import nn
class DonutSwinEncoder(nn.Module):
def __init__(self, config, grid_size):
super().__init__()
self.num_layers = len(config.depths)
self.config = config
dpr = [x.item() for x in torch.linspace(0, config.drop_path_rate, sum(config.depths), device='cpu')]
self.layers = nn.ModuleList([DonutSwinStage(config=config, dim=int(config.embed_dim * 2 ** i_layer), input_resolution=(grid_size[0] // 2 ** i_layer, grid_size[1] // 2 ** i_layer), depth=config.depths[i_layer], num_heads=config.num_heads[i_layer], drop_path=dpr[sum(config.depths[:i_layer]):sum(config.depths[:i_layer + 1])], downsample=DonutSwinPatchMerging if i_layer < self.num_layers - 1 else None) for i_layer in range(self.num_layers)])
self.gradient_checkpointing = False
def forward(self, hidden_states: torch.Tensor, input_dimensions: tuple[int, int], head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False, output_hidden_states: Optional[bool]=False, output_hidden_states_before_downsampling: Optional[bool]=False, always_partition: Optional[bool]=False, return_dict: Optional[bool]=True) -> Union[tuple, DonutSwinEncoderOutput]:
all_hidden_states = () if output_hidden_states else None
all_reshaped_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
if output_hidden_states:
batch_size, _, hidden_size = hidden_states.shape
reshaped_hidden_state = hidden_states.view(batch_size, *input_dimensions, hidden_size)
reshaped_hidden_state = reshaped_hidden_state.permute(0, 3, 1, 2)
all_hidden_states += (hidden_states,)
all_reshaped_hidden_states += (reshaped_hidden_state,)
for i, layer_module in enumerate(self.layers):
layer_head_mask = head_mask[i] if head_mask is not None else None
layer_outputs = layer_module(hidden_states, input_dimensions, layer_head_mask, output_attentions, always_partition)
hidden_states = layer_outputs[0]
hidden_states_before_downsampling = layer_outputs[1]
output_dimensions = layer_outputs[2]
input_dimensions = (output_dimensions[-2], output_dimensions[-1])
if output_hidden_states and output_hidden_states_before_downsampling:
batch_size, _, hidden_size = hidden_states_before_downsampling.shape
reshaped_hidden_state = hidden_states_before_downsampling.view(batch_size, *(output_dimensions[0], output_dimensions[1]), hidden_size)
reshaped_hidden_state = reshaped_hidden_state.permute(0, 3, 1, 2)
all_hidden_states += (hidden_states_before_downsampling,)
all_reshaped_hidden_states += (reshaped_hidden_state,)
elif output_hidden_states and (not output_hidden_states_before_downsampling):
batch_size, _, hidden_size = hidden_states.shape
reshaped_hidden_state = hidden_states.view(batch_size, *input_dimensions, hidden_size)
reshaped_hidden_state = reshaped_hidden_state.permute(0, 3, 1, 2)
all_hidden_states += (hidden_states,)
all_reshaped_hidden_states += (reshaped_hidden_state,)
if output_attentions:
all_self_attentions += layer_outputs[3:]
if not return_dict:
return tuple((v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None))
return DonutSwinEncoderOutput(last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions, reshaped_hidden_states=all_reshaped_hidden_states)
|
class DonutSwinEncoder(nn.Module):
def __init__(self, config, grid_size):
pass
def forward(self, hidden_states: torch.Tensor, input_dimensions: tuple[int, int], head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False, output_hidden_states: Optional[bool]=False, output_hidden_states_before_downsampling: Optional[bool]=False, always_partition: Optional[bool]=False, return_dict: Optional[bool]=True) -> Union[tuple, DonutSwinEncoderOutput]:
pass
| 3
| 0
| 49
| 5
| 42
| 2
| 7
| 0.05
| 1
| 10
| 3
| 0
| 2
| 4
| 2
| 12
| 99
| 11
| 84
| 28
| 71
| 4
| 43
| 18
| 40
| 12
| 1
| 2
| 14
|
2,216
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/donut/modeling_donut_swin.py
|
transformers.models.donut.modeling_donut_swin.DonutSwinEncoderOutput
|
from typing import Optional, Union
from dataclasses import dataclass
import torch
from ...utils import ModelOutput, auto_docstring, logging, torch_int
@dataclass
@auto_docstring(custom_intro="\n DonutSwin encoder's outputs, with potential hidden states and attentions.\n ")
class DonutSwinEncoderOutput(ModelOutput):
"""
reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, hidden_size, height, width)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to
include the spatial dimensions.
"""
last_hidden_state: Optional[torch.FloatTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
attentions: Optional[tuple[torch.FloatTensor, ...]] = None
reshaped_hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
|
@dataclass
@auto_docstring(custom_intro="\n DonutSwin encoder's outputs, with potential hidden states and attentions.\n ")
class DonutSwinEncoderOutput(ModelOutput):
'''
reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, hidden_size, height, width)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to
include the spatial dimensions.
'''
pass
| 3
| 1
| 0
| 0
| 0
| 0
| 0
| 4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 30
| 5
| 5
| 5
| 4
| 20
| 5
| 5
| 4
| 0
| 1
| 0
| 0
|
2,217
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/donut/modeling_donut_swin.py
|
transformers.models.donut.modeling_donut_swin.DonutSwinIntermediate
|
from torch import nn
import torch
from ...activations import ACT2FN
class DonutSwinIntermediate(nn.Module):
def __init__(self, config, dim):
super().__init__()
self.dense = nn.Linear(dim, int(config.mlp_ratio * dim))
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
|
class DonutSwinIntermediate(nn.Module):
def __init__(self, config, dim):
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 6
| 0
| 6
| 0
| 2
| 0
| 1
| 4
| 0
| 0
| 2
| 2
| 2
| 12
| 13
| 1
| 12
| 5
| 9
| 0
| 11
| 5
| 8
| 2
| 1
| 1
| 3
|
2,218
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/donut/modeling_donut_swin.py
|
transformers.models.donut.modeling_donut_swin.DonutSwinLayer
|
from typing import Optional, Union
from ...utils import ModelOutput, auto_docstring, logging, torch_int
from torch import nn
import torch
class DonutSwinLayer(nn.Module):
def __init__(self, config, dim, input_resolution, num_heads, drop_path_rate=0.0, shift_size=0):
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.shift_size = shift_size
self.window_size = config.window_size
self.input_resolution = input_resolution
self.layernorm_before = nn.LayerNorm(dim, eps=config.layer_norm_eps)
self.attention = DonutSwinAttention(config, dim, num_heads, window_size=self.window_size)
self.drop_path = DonutSwinDropPath(drop_path_rate) if drop_path_rate > 0.0 else nn.Identity()
self.layernorm_after = nn.LayerNorm(dim, eps=config.layer_norm_eps)
self.intermediate = DonutSwinIntermediate(config, dim)
self.output = DonutSwinOutput(config, dim)
def set_shift_and_window_size(self, input_resolution):
if min(input_resolution) <= self.window_size:
self.shift_size = torch_int(0)
self.window_size = torch.min(torch.tensor(input_resolution)) if torch.jit.is_tracing() else min(input_resolution)
def get_attn_mask(self, height, width, dtype, device):
if self.shift_size > 0:
img_mask = torch.zeros((1, height, width, 1), dtype=dtype, device=device)
height_slices = (slice(0, -self.window_size), slice(-self.window_size, -self.shift_size), slice(-self.shift_size, None))
width_slices = (slice(0, -self.window_size), slice(-self.window_size, -self.shift_size), slice(-self.shift_size, None))
count = 0
for height_slice in height_slices:
for width_slice in width_slices:
img_mask[:, height_slice, width_slice, :] = count
count += 1
mask_windows = window_partition(img_mask, self.window_size)
mask_windows = mask_windows.view(-1, self.window_size * self.window_size)
attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
attn_mask = attn_mask.masked_fill(attn_mask != 0, -100.0).masked_fill(attn_mask == 0, 0.0)
else:
attn_mask = None
return attn_mask
def maybe_pad(self, hidden_states, height, width):
pad_right = (self.window_size - width % self.window_size) % self.window_size
pad_bottom = (self.window_size - height % self.window_size) % self.window_size
pad_values = (0, 0, 0, pad_right, 0, pad_bottom)
hidden_states = nn.functional.pad(hidden_states, pad_values)
return (hidden_states, pad_values)
def forward(self, hidden_states: torch.Tensor, input_dimensions: tuple[int, int], head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False, always_partition: Optional[bool]=False) -> tuple[torch.Tensor, torch.Tensor]:
if not always_partition:
self.set_shift_and_window_size(input_dimensions)
else:
pass
height, width = input_dimensions
batch_size, _, channels = hidden_states.size()
shortcut = hidden_states
hidden_states = self.layernorm_before(hidden_states)
hidden_states = hidden_states.view(batch_size, height, width, channels)
hidden_states, pad_values = self.maybe_pad(hidden_states, height, width)
_, height_pad, width_pad, _ = hidden_states.shape
if self.shift_size > 0:
shifted_hidden_states = torch.roll(hidden_states, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2))
else:
shifted_hidden_states = hidden_states
hidden_states_windows = window_partition(shifted_hidden_states, self.window_size)
hidden_states_windows = hidden_states_windows.view(-1, self.window_size * self.window_size, channels)
attn_mask = self.get_attn_mask(height_pad, width_pad, dtype=hidden_states.dtype, device=hidden_states_windows.device)
attention_outputs = self.attention(hidden_states_windows, attn_mask, head_mask, output_attentions=output_attentions)
attention_output = attention_outputs[0]
attention_windows = attention_output.view(-1, self.window_size, self.window_size, channels)
shifted_windows = window_reverse(attention_windows, self.window_size, height_pad, width_pad)
if self.shift_size > 0:
attention_windows = torch.roll(shifted_windows, shifts=(self.shift_size, self.shift_size), dims=(1, 2))
else:
attention_windows = shifted_windows
was_padded = pad_values[3] > 0 or pad_values[5] > 0
if was_padded:
attention_windows = attention_windows[:, :height, :width, :].contiguous()
attention_windows = attention_windows.view(batch_size, height * width, channels)
hidden_states = shortcut + self.drop_path(attention_windows)
layer_output = self.layernorm_after(hidden_states)
layer_output = self.intermediate(layer_output)
layer_output = hidden_states + self.output(layer_output)
layer_outputs = (layer_output, attention_outputs[1]) if output_attentions else (layer_output,)
return layer_outputs
|
class DonutSwinLayer(nn.Module):
def __init__(self, config, dim, input_resolution, num_heads, drop_path_rate=0.0, shift_size=0):
pass
def set_shift_and_window_size(self, input_resolution):
pass
def get_attn_mask(self, height, width, dtype, device):
pass
def maybe_pad(self, hidden_states, height, width):
pass
def forward(self, hidden_states: torch.Tensor, input_dimensions: tuple[int, int], head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False, always_partition: Optional[bool]=False) -> tuple[torch.Tensor, torch.Tensor]:
pass
| 6
| 0
| 24
| 3
| 19
| 1
| 3
| 0.06
| 1
| 10
| 4
| 0
| 5
| 10
| 5
| 15
| 123
| 19
| 98
| 49
| 85
| 6
| 73
| 42
| 67
| 6
| 1
| 3
| 16
|
2,219
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/donut/modeling_donut_swin.py
|
transformers.models.donut.modeling_donut_swin.DonutSwinModel
|
from ...utils import ModelOutput, auto_docstring, logging, torch_int
import torch
from torch import nn
from typing import Optional, Union
@auto_docstring
class DonutSwinModel(DonutSwinPreTrainedModel):
def __init__(self, config, add_pooling_layer=True, use_mask_token=False):
"""
add_pooling_layer (bool, *optional*, defaults to `True`):
Whether to add a pooling layer
use_mask_token (`bool`, *optional*, defaults to `False`):
Whether to use a mask token for masked image modeling.
"""
super().__init__(config)
self.config = config
self.num_layers = len(config.depths)
self.num_features = int(config.embed_dim * 2 ** (self.num_layers - 1))
self.embeddings = DonutSwinEmbeddings(config, use_mask_token=use_mask_token)
self.encoder = DonutSwinEncoder(config, self.embeddings.patch_grid)
self.pooler = nn.AdaptiveAvgPool1d(1) if add_pooling_layer else None
self.post_init()
def get_input_embeddings(self):
return self.embeddings.patch_embeddings
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@auto_docstring
def forward(self, pixel_values: Optional[torch.FloatTensor]=None, bool_masked_pos: Optional[torch.BoolTensor]=None, head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, interpolate_pos_encoding: bool=False, return_dict: Optional[bool]=None) -> Union[tuple, DonutSwinModelOutput]:
"""
bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, num_patches)`):
Boolean masked positions. Indicates which patches are masked (1) and which aren't (0).
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('You have to specify pixel_values')
head_mask = self.get_head_mask(head_mask, len(self.config.depths))
embedding_output, input_dimensions = self.embeddings(pixel_values, bool_masked_pos=bool_masked_pos, interpolate_pos_encoding=interpolate_pos_encoding)
encoder_outputs = self.encoder(embedding_output, input_dimensions, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
sequence_output = encoder_outputs[0]
pooled_output = None
if self.pooler is not None:
pooled_output = self.pooler(sequence_output.transpose(1, 2))
pooled_output = torch.flatten(pooled_output, 1)
if not return_dict:
output = (sequence_output, pooled_output) + encoder_outputs[1:]
return output
return DonutSwinModelOutput(last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, reshaped_hidden_states=encoder_outputs.reshaped_hidden_states)
|
@auto_docstring
class DonutSwinModel(DonutSwinPreTrainedModel):
def __init__(self, config, add_pooling_layer=True, use_mask_token=False):
'''
add_pooling_layer (bool, *optional*, defaults to `True`):
Whether to add a pooling layer
use_mask_token (`bool`, *optional*, defaults to `False`):
Whether to use a mask token for masked image modeling.
'''
pass
def get_input_embeddings(self):
pass
def _prune_heads(self, heads_to_prune):
'''
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
'''
pass
@auto_docstring
def forward(self, pixel_values: Optional[torch.FloatTensor]=None, bool_masked_pos: Optional[torch.BoolTensor]=None, head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, interpolate_pos_encoding: bool=False, return_dict: Optional[bool]=None) -> Union[tuple, DonutSwinModelOutput]:
'''
bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, num_patches)`):
Boolean masked positions. Indicates which patches are masked (1) and which aren't (0).
'''
pass
| 7
| 3
| 21
| 3
| 15
| 4
| 3
| 0.21
| 1
| 7
| 3
| 0
| 4
| 6
| 4
| 5
| 96
| 15
| 67
| 27
| 45
| 14
| 33
| 17
| 28
| 7
| 2
| 1
| 12
|
2,220
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/donut/modeling_donut_swin.py
|
transformers.models.donut.modeling_donut_swin.DonutSwinModelOutput
|
from typing import Optional, Union
from dataclasses import dataclass
from ...utils import ModelOutput, auto_docstring, logging, torch_int
import torch
@dataclass
@auto_docstring(custom_intro="\n DonutSwin model's outputs that also contains a pooling of the last hidden states.\n ")
class DonutSwinModelOutput(ModelOutput):
"""
pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`, *optional*, returned when `add_pooling_layer=True` is passed):
Average pooling of the last layer hidden-state.
reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, hidden_size, height, width)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to
include the spatial dimensions.
"""
last_hidden_state: Optional[torch.FloatTensor] = None
pooler_output: Optional[torch.FloatTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
attentions: Optional[tuple[torch.FloatTensor, ...]] = None
reshaped_hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
|
@dataclass
@auto_docstring(custom_intro="\n DonutSwin model's outputs that also contains a pooling of the last hidden states.\n ")
class DonutSwinModelOutput(ModelOutput):
'''
pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`, *optional*, returned when `add_pooling_layer=True` is passed):
Average pooling of the last layer hidden-state.
reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, hidden_size, height, width)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to
include the spatial dimensions.
'''
pass
| 3
| 1
| 0
| 0
| 0
| 0
| 0
| 3.67
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 33
| 5
| 6
| 6
| 5
| 22
| 6
| 6
| 5
| 0
| 1
| 0
| 0
|
2,221
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/donut/modeling_donut_swin.py
|
transformers.models.donut.modeling_donut_swin.DonutSwinOutput
|
from torch import nn
import torch
class DonutSwinOutput(nn.Module):
def __init__(self, config, dim):
super().__init__()
self.dense = nn.Linear(int(config.mlp_ratio * dim), dim)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
return hidden_states
|
class DonutSwinOutput(nn.Module):
def __init__(self, config, dim):
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 4
| 0
| 4
| 0
| 1
| 0
| 1
| 3
| 0
| 0
| 2
| 2
| 2
| 12
| 10
| 1
| 9
| 5
| 6
| 0
| 9
| 5
| 6
| 1
| 1
| 0
| 2
|
2,222
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/donut/modeling_donut_swin.py
|
transformers.models.donut.modeling_donut_swin.DonutSwinPatchEmbeddings
|
from typing import Optional, Union
from torch import nn
import collections.abc
import torch
class DonutSwinPatchEmbeddings(nn.Module):
"""
This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial
`hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a
Transformer.
"""
def __init__(self, config):
super().__init__()
image_size, patch_size = (config.image_size, config.patch_size)
num_channels, hidden_size = (config.num_channels, config.embed_dim)
image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size)
patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size)
num_patches = image_size[1] // patch_size[1] * (image_size[0] // patch_size[0])
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.num_patches = num_patches
self.grid_size = (image_size[0] // patch_size[0], image_size[1] // patch_size[1])
self.projection = nn.Conv2d(num_channels, hidden_size, kernel_size=patch_size, stride=patch_size)
def maybe_pad(self, pixel_values, height, width):
if width % self.patch_size[1] != 0:
pad_values = (0, self.patch_size[1] - width % self.patch_size[1])
pixel_values = nn.functional.pad(pixel_values, pad_values)
if height % self.patch_size[0] != 0:
pad_values = (0, 0, 0, self.patch_size[0] - height % self.patch_size[0])
pixel_values = nn.functional.pad(pixel_values, pad_values)
return pixel_values
def forward(self, pixel_values: Optional[torch.FloatTensor]) -> tuple[torch.Tensor, tuple[int]]:
_, num_channels, height, width = pixel_values.shape
pixel_values = self.maybe_pad(pixel_values, height, width)
embeddings = self.projection(pixel_values)
_, _, height, width = embeddings.shape
output_dimensions = (height, width)
embeddings = embeddings.flatten(2).transpose(1, 2)
return (embeddings, output_dimensions)
|
class DonutSwinPatchEmbeddings(nn.Module):
'''
This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial
`hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a
Transformer.
'''
def __init__(self, config):
pass
def maybe_pad(self, pixel_values, height, width):
pass
def forward(self, pixel_values: Optional[torch.FloatTensor]) -> tuple[torch.Tensor, tuple[int]]:
pass
| 4
| 1
| 11
| 1
| 10
| 0
| 2
| 0.2
| 1
| 4
| 0
| 0
| 3
| 6
| 3
| 13
| 41
| 5
| 30
| 17
| 26
| 6
| 30
| 17
| 26
| 3
| 1
| 1
| 7
|
2,223
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/donut/modeling_donut_swin.py
|
transformers.models.donut.modeling_donut_swin.DonutSwinPatchMerging
|
import torch
from torch import nn
class DonutSwinPatchMerging(nn.Module):
"""
Patch Merging Layer.
Args:
input_resolution (`tuple[int]`):
Resolution of input feature.
dim (`int`):
Number of input channels.
norm_layer (`nn.Module`, *optional*, defaults to `nn.LayerNorm`):
Normalization layer class.
"""
def __init__(self, input_resolution: tuple[int], dim: int, norm_layer: nn.Module=nn.LayerNorm) -> None:
super().__init__()
self.input_resolution = input_resolution
self.dim = dim
self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False)
self.norm = norm_layer(4 * dim)
def maybe_pad(self, input_feature, height, width):
should_pad = height % 2 == 1 or width % 2 == 1
if should_pad:
pad_values = (0, 0, 0, width % 2, 0, height % 2)
input_feature = nn.functional.pad(input_feature, pad_values)
return input_feature
def forward(self, input_feature: torch.Tensor, input_dimensions: tuple[int, int]) -> torch.Tensor:
height, width = input_dimensions
batch_size, dim, num_channels = input_feature.shape
input_feature = input_feature.view(batch_size, height, width, num_channels)
input_feature = self.maybe_pad(input_feature, height, width)
input_feature_0 = input_feature[:, 0::2, 0::2, :]
input_feature_1 = input_feature[:, 1::2, 0::2, :]
input_feature_2 = input_feature[:, 0::2, 1::2, :]
input_feature_3 = input_feature[:, 1::2, 1::2, :]
input_feature = torch.cat([input_feature_0, input_feature_1, input_feature_2, input_feature_3], -1)
input_feature = input_feature.view(batch_size, -1, 4 * num_channels)
input_feature = self.norm(input_feature)
input_feature = self.reduction(input_feature)
return input_feature
|
class DonutSwinPatchMerging(nn.Module):
'''
Patch Merging Layer.
Args:
input_resolution (`tuple[int]`):
Resolution of input feature.
dim (`int`):
Number of input channels.
norm_layer (`nn.Module`, *optional*, defaults to `nn.LayerNorm`):
Normalization layer class.
'''
def __init__(self, input_resolution: tuple[int], dim: int, norm_layer: nn.Module=nn.LayerNorm) -> None:
pass
def maybe_pad(self, input_feature, height, width):
pass
def forward(self, input_feature: torch.Tensor, input_dimensions: tuple[int, int]) -> torch.Tensor:
pass
| 4
| 1
| 12
| 1
| 9
| 3
| 1
| 0.67
| 1
| 3
| 0
| 0
| 3
| 4
| 3
| 13
| 52
| 8
| 27
| 16
| 23
| 18
| 27
| 16
| 23
| 2
| 1
| 1
| 4
|
2,224
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/donut/modeling_donut_swin.py
|
transformers.models.donut.modeling_donut_swin.DonutSwinPreTrainedModel
|
from .configuration_donut_swin import DonutSwinConfig
from ...modeling_utils import PreTrainedModel
from ...utils import ModelOutput, auto_docstring, logging, torch_int
from torch import nn
@auto_docstring
class DonutSwinPreTrainedModel(PreTrainedModel):
config: DonutSwinConfig
base_model_prefix = 'donut'
main_input_name = 'pixel_values'
supports_gradient_checkpointing = True
_no_split_modules = ['DonutSwinStage']
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, (nn.Linear, nn.Conv2d)):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
elif isinstance(module, DonutSwinEmbeddings):
if module.mask_token is not None:
module.mask_token.data.zero_()
if module.position_embeddings is not None:
module.position_embeddings.data.zero_()
elif isinstance(module, DonutSwinSelfAttention):
module.relative_position_bias_table.data.zero_()
|
@auto_docstring
class DonutSwinPreTrainedModel(PreTrainedModel):
def _init_weights(self, module):
'''Initialize the weights'''
pass
| 3
| 1
| 11
| 0
| 8
| 3
| 4
| 0.5
| 1
| 0
| 0
| 1
| 1
| 0
| 1
| 1
| 23
| 2
| 14
| 7
| 12
| 7
| 13
| 7
| 11
| 4
| 1
| 2
| 4
|
2,225
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/donut/modeling_donut_swin.py
|
transformers.models.donut.modeling_donut_swin.DonutSwinSelfAttention
|
from ...pytorch_utils import find_pruneable_heads_and_indices, meshgrid, prune_linear_layer
from typing import Optional, Union
import collections.abc
import math
from torch import nn
import torch
class DonutSwinSelfAttention(nn.Module):
def __init__(self, config, dim, num_heads, window_size):
super().__init__()
if dim % num_heads != 0:
raise ValueError(f'The hidden size ({dim}) is not a multiple of the number of attention heads ({num_heads})')
self.num_attention_heads = num_heads
self.attention_head_size = int(dim / num_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.window_size = window_size if isinstance(window_size, collections.abc.Iterable) else (window_size, window_size)
self.relative_position_bias_table = nn.Parameter(torch.zeros((2 * self.window_size[0] - 1) * (2 * self.window_size[1] - 1), num_heads))
coords_h = torch.arange(self.window_size[0])
coords_w = torch.arange(self.window_size[1])
coords = torch.stack(meshgrid([coords_h, coords_w], indexing='ij'))
coords_flatten = torch.flatten(coords, 1)
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :]
relative_coords = relative_coords.permute(1, 2, 0).contiguous()
relative_coords[:, :, 0] += self.window_size[0] - 1
relative_coords[:, :, 1] += self.window_size[1] - 1
relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1
relative_position_index = relative_coords.sum(-1)
self.register_buffer('relative_position_index', relative_position_index)
self.query = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias)
self.key = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias)
self.value = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False) -> tuple[torch.Tensor]:
batch_size, dim, num_channels = hidden_states.shape
hidden_shape = (batch_size, dim, -1, self.attention_head_size)
query_layer = self.query(hidden_states).view(hidden_shape).transpose(1, 2)
key_layer = self.key(hidden_states).view(hidden_shape).transpose(1, 2)
value_layer = self.value(hidden_states).view(hidden_shape).transpose(1, 2)
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)]
relative_position_bias = relative_position_bias.view(self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1)
relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous()
attention_scores = attention_scores + relative_position_bias.unsqueeze(0)
if attention_mask is not None:
mask_shape = attention_mask.shape[0]
attention_scores = attention_scores.view(batch_size // mask_shape, mask_shape, self.num_attention_heads, dim, dim)
attention_scores = attention_scores + attention_mask.unsqueeze(1).unsqueeze(0)
attention_scores = attention_scores.view(-1, self.num_attention_heads, dim, dim)
attention_probs = nn.functional.softmax(attention_scores, dim=-1)
attention_probs = self.dropout(attention_probs)
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(new_context_layer_shape)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
return outputs
|
class DonutSwinSelfAttention(nn.Module):
def __init__(self, config, dim, num_heads, window_size):
pass
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False) -> tuple[torch.Tensor]:
pass
| 3
| 0
| 32
| 6
| 24
| 2
| 3
| 0.1
| 1
| 6
| 0
| 0
| 3
| 9
| 3
| 13
| 98
| 19
| 72
| 38
| 62
| 7
| 56
| 32
| 52
| 4
| 1
| 1
| 8
|
2,226
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/donut/modeling_donut_swin.py
|
transformers.models.donut.modeling_donut_swin.DonutSwinSelfOutput
|
from torch import nn
import torch
class DonutSwinSelfOutput(nn.Module):
def __init__(self, config, dim):
super().__init__()
self.dense = nn.Linear(dim, dim)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
return hidden_states
|
class DonutSwinSelfOutput(nn.Module):
def __init__(self, config, dim):
pass
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 5
| 1
| 4
| 0
| 1
| 0
| 1
| 2
| 0
| 0
| 2
| 2
| 2
| 12
| 11
| 2
| 9
| 5
| 6
| 0
| 9
| 5
| 6
| 1
| 1
| 0
| 2
|
2,227
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/donut/modeling_donut_swin.py
|
transformers.models.donut.modeling_donut_swin.DonutSwinStage
|
from ...modeling_layers import GradientCheckpointingLayer
from torch import nn
import torch
from typing import Optional, Union
class DonutSwinStage(GradientCheckpointingLayer):
def __init__(self, config, dim, input_resolution, depth, num_heads, drop_path, downsample):
super().__init__()
self.config = config
self.dim = dim
self.blocks = nn.ModuleList([DonutSwinLayer(config=config, dim=dim, input_resolution=input_resolution, num_heads=num_heads, drop_path_rate=drop_path[i], shift_size=0 if i % 2 == 0 else config.window_size // 2) for i in range(depth)])
if downsample is not None:
self.downsample = downsample(input_resolution, dim=dim, norm_layer=nn.LayerNorm)
else:
self.downsample = None
self.pointing = False
def forward(self, hidden_states: torch.Tensor, input_dimensions: tuple[int, int], head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False, always_partition: Optional[bool]=False) -> tuple[torch.Tensor]:
height, width = input_dimensions
for i, layer_module in enumerate(self.blocks):
layer_head_mask = head_mask[i] if head_mask is not None else None
layer_outputs = layer_module(hidden_states, input_dimensions, layer_head_mask, output_attentions, always_partition)
hidden_states = layer_outputs[0]
hidden_states_before_downsampling = hidden_states
if self.downsample is not None:
height_downsampled, width_downsampled = ((height + 1) // 2, (width + 1) // 2)
output_dimensions = (height, width, height_downsampled, width_downsampled)
hidden_states = self.downsample(hidden_states_before_downsampling, input_dimensions)
else:
output_dimensions = (height, width, height, width)
stage_outputs = (hidden_states, hidden_states_before_downsampling, output_dimensions)
if output_attentions:
stage_outputs += layer_outputs[1:]
return stage_outputs
|
class DonutSwinStage(GradientCheckpointingLayer):
def __init__(self, config, dim, input_resolution, depth, num_heads, drop_path, downsample):
pass
def forward(self, hidden_states: torch.Tensor, input_dimensions: tuple[int, int], head_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False, always_partition: Optional[bool]=False) -> tuple[torch.Tensor]:
pass
| 3
| 0
| 28
| 4
| 24
| 1
| 4
| 0.02
| 1
| 7
| 1
| 0
| 2
| 5
| 2
| 12
| 58
| 8
| 49
| 23
| 39
| 1
| 26
| 16
| 23
| 5
| 1
| 1
| 8
|
2,228
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/donut/processing_donut.py
|
transformers.models.donut.processing_donut.DonutProcessor
|
from ...image_utils import ImageInput
from typing import Optional, Union
import warnings
from ...tokenization_utils_base import PreTokenizedInput, TextInput
import re
from contextlib import contextmanager
from ...processing_utils import ProcessingKwargs, ProcessorMixin, Unpack
class DonutProcessor(ProcessorMixin):
"""
Constructs a Donut processor which wraps a Donut image processor and an XLMRoBERTa tokenizer into a single
processor.
[`DonutProcessor`] offers all the functionalities of [`DonutImageProcessor`] and
[`XLMRobertaTokenizer`/`XLMRobertaTokenizerFast`]. See the [`~DonutProcessor.__call__`] and
[`~DonutProcessor.decode`] for more information.
Args:
image_processor ([`DonutImageProcessor`], *optional*):
An instance of [`DonutImageProcessor`]. The image processor is a required input.
tokenizer ([`XLMRobertaTokenizer`/`XLMRobertaTokenizerFast`], *optional*):
An instance of [`XLMRobertaTokenizer`/`XLMRobertaTokenizerFast`]. The tokenizer is a required input.
"""
attributes = ['image_processor', 'tokenizer']
image_processor_class = 'AutoImageProcessor'
tokenizer_class = 'AutoTokenizer'
def __init__(self, image_processor=None, tokenizer=None, **kwargs):
feature_extractor = None
if 'feature_extractor' in kwargs:
warnings.warn('The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor` instead.', FutureWarning)
feature_extractor = kwargs.pop('feature_extractor')
image_processor = image_processor if image_processor is not None else feature_extractor
super().__init__(image_processor, tokenizer)
self.current_processor = self.image_processor
self._in_target_context_manager = False
def __call__(self, images: Optional[ImageInput]=None, text: Optional[Union[str, list[str], TextInput, PreTokenizedInput]]=None, audio=None, videos=None, **kwargs: Unpack[DonutProcessorKwargs]):
"""
When used in normal mode, this method forwards all its arguments to AutoImageProcessor's
[`~AutoImageProcessor.__call__`] and returns its output. If used in the context
[`~DonutProcessor.as_target_processor`] this method forwards all its arguments to DonutTokenizer's
[`~DonutTokenizer.__call__`]. Please refer to the docstring of the above two methods for more information.
"""
if self._in_target_context_manager:
return self.current_processor(images, text, **kwargs)
if images is None and text is None:
raise ValueError('You need to specify either an `images` or `text` input to process.')
output_kwargs = self._merge_kwargs(DonutProcessorKwargs, tokenizer_init_kwargs=self.tokenizer.init_kwargs, **kwargs)
if images is not None:
inputs = self.image_processor(images, **output_kwargs['images_kwargs'])
if text is not None:
if images is not None:
output_kwargs['text_kwargs'].setdefault('add_special_tokens', False)
encodings = self.tokenizer(text, **output_kwargs['text_kwargs'])
if text is None:
return inputs
elif images is None:
return encodings
else:
inputs['labels'] = encodings['input_ids']
inputs['input_ids'] = encodings['input_ids']
return inputs
@property
def model_input_names(self):
image_processor_input_names = self.image_processor.model_input_names
return list(image_processor_input_names + ['input_ids', 'labels'])
@contextmanager
def as_target_processor(self):
"""
Temporarily sets the tokenizer for processing the input. Useful for encoding the labels when fine-tuning TrOCR.
"""
warnings.warn('`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your labels by using the argument `text` of the regular `__call__` method (either in the same call as your images inputs, or in a separate call.')
self._in_target_context_manager = True
self.current_processor = self.tokenizer
yield
self.current_processor = self.image_processor
self._in_target_context_manager = False
def token2json(self, tokens, is_inner_value=False, added_vocab=None):
"""
Convert a (generated) token sequence into an ordered JSON format.
"""
if added_vocab is None:
added_vocab = self.tokenizer.get_added_vocab()
output = {}
while tokens:
potential_start = re.search('<s_', tokens, re.IGNORECASE)
if potential_start is None:
break
start_token = tokens[potential_start.start():]
if '>' not in start_token:
break
start_token = start_token[:start_token.index('>') + 1]
key = start_token[len('<s_'):-len('>')]
key_escaped = re.escape(key)
end_token = re.search(f'</s_{key_escaped}>', tokens, re.IGNORECASE)
if end_token is None:
tokens = tokens.replace(start_token, '')
else:
end_token = end_token.group()
start_token_escaped = re.escape(start_token)
end_token_escaped = re.escape(end_token)
content = re.search(f'{start_token_escaped}(.*?){end_token_escaped}', tokens, re.IGNORECASE | re.DOTALL)
if content is not None:
content = content.group(1).strip()
if '<s_' in content and '</s_' in content:
value = self.token2json(content, is_inner_value=True, added_vocab=added_vocab)
if value:
if len(value) == 1:
value = value[0]
output[key] = value
else:
output[key] = []
for leaf in content.split('<sep/>'):
leaf = leaf.strip()
if leaf in added_vocab and leaf[0] == '<' and (leaf[-2:] == '/>'):
leaf = leaf[1:-2]
output[key].append(leaf)
if len(output[key]) == 1:
output[key] = output[key][0]
tokens = tokens[tokens.find(end_token) + len(end_token):].strip()
if tokens[:6] == '<sep/>':
return [output] + self.token2json(tokens[6:], is_inner_value=True, added_vocab=added_vocab)
if output:
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {'text_sequence': tokens}
@property
def feature_extractor_class(self):
warnings.warn('`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.', FutureWarning)
return self.image_processor_class
@property
def feature_extractor(self):
warnings.warn('`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.', FutureWarning)
return self.image_processor
|
class DonutProcessor(ProcessorMixin):
'''
Constructs a Donut processor which wraps a Donut image processor and an XLMRoBERTa tokenizer into a single
processor.
[`DonutProcessor`] offers all the functionalities of [`DonutImageProcessor`] and
[`XLMRobertaTokenizer`/`XLMRobertaTokenizerFast`]. See the [`~DonutProcessor.__call__`] and
[`~DonutProcessor.decode`] for more information.
Args:
image_processor ([`DonutImageProcessor`], *optional*):
An instance of [`DonutImageProcessor`]. The image processor is a required input.
tokenizer ([`XLMRobertaTokenizer`/`XLMRobertaTokenizerFast`], *optional*):
An instance of [`XLMRobertaTokenizer`/`XLMRobertaTokenizerFast`]. The tokenizer is a required input.
'''
def __init__(self, image_processor=None, tokenizer=None, **kwargs):
pass
def __call__(self, images: Optional[ImageInput]=None, text: Optional[Union[str, list[str], TextInput, PreTokenizedInput]]=None, audio=None, videos=None, **kwargs: Unpack[DonutProcessorKwargs]):
'''
When used in normal mode, this method forwards all its arguments to AutoImageProcessor's
[`~AutoImageProcessor.__call__`] and returns its output. If used in the context
[`~DonutProcessor.as_target_processor`] this method forwards all its arguments to DonutTokenizer's
[`~DonutTokenizer.__call__`]. Please refer to the docstring of the above two methods for more information.
'''
pass
@property
def model_input_names(self):
pass
@contextmanager
def as_target_processor(self):
'''
Temporarily sets the tokenizer for processing the input. Useful for encoding the labels when fine-tuning TrOCR.
'''
pass
def token2json(self, tokens, is_inner_value=False, added_vocab=None):
'''
Convert a (generated) token sequence into an ordered JSON format.
'''
pass
@property
def feature_extractor_class(self):
pass
@property
def feature_extractor_class(self):
pass
| 12
| 4
| 20
| 2
| 16
| 3
| 4
| 0.29
| 1
| 5
| 1
| 0
| 8
| 2
| 8
| 25
| 192
| 23
| 135
| 39
| 116
| 39
| 95
| 29
| 86
| 16
| 2
| 6
| 35
|
2,229
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/donut/processing_donut.py
|
transformers.models.donut.processing_donut.DonutProcessorKwargs
|
from ...processing_utils import ProcessingKwargs, ProcessorMixin, Unpack
class DonutProcessorKwargs(ProcessingKwargs, total=False):
_defaults = {}
|
class DonutProcessorKwargs(ProcessingKwargs, total=False):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2
| 0
| 2
| 2
| 1
| 0
| 2
| 2
| 1
| 0
| 3
| 0
| 0
|
2,230
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/dpr/configuration_dpr.py
|
transformers.models.dpr.configuration_dpr.DPRConfig
|
from ...configuration_utils import PretrainedConfig
class DPRConfig(PretrainedConfig):
"""
[`DPRConfig`] is the configuration class to store the configuration of a *DPRModel*.
This is the configuration class to store the configuration of a [`DPRContextEncoder`], [`DPRQuestionEncoder`], or a
[`DPRReader`]. It is used to instantiate the components of the DPR model according to the specified arguments,
defining the model component architectures. Instantiating a configuration with the defaults will yield a similar
configuration to that of the DPRContextEncoder
[facebook/dpr-ctx_encoder-single-nq-base](https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base)
architecture.
This class is a subclass of [`BertConfig`]. Please check the superclass for the documentation of all kwargs.
Args:
vocab_size (`int`, *optional*, defaults to 30522):
Vocabulary size of the DPR model. Defines the different tokens that can be represented by the *inputs_ids*
passed to the forward method of [`BertModel`].
hidden_size (`int`, *optional*, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (`int`, *optional*, defaults to 3072):
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"silu"` and `"gelu_new"` are supported.
hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout ratio for the attention probabilities.
max_position_embeddings (`int`, *optional*, defaults to 512):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
type_vocab_size (`int`, *optional*, defaults to 2):
The vocabulary size of the *token_type_ids* passed into [`BertModel`].
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
The epsilon used by the layer normalization layers.
pad_token_id (`int`, *optional*, defaults to 0):
Padding token id.
position_embedding_type (`str`, *optional*, defaults to `"absolute"`):
Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For
positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to
[Self-Attention with Relative Position Representations (Shaw et al.)](https://huggingface.co/papers/1803.02155).
For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models
with Better Relative Position Embeddings (Huang et al.)](https://huggingface.co/papers/2009.13658).
projection_dim (`int`, *optional*, defaults to 0):
Dimension of the projection for the context and question encoders. If it is set to zero (default), then no
projection is done.
Example:
```python
>>> from transformers import DPRConfig, DPRContextEncoder
>>> # Initializing a DPR facebook/dpr-ctx_encoder-single-nq-base style configuration
>>> configuration = DPRConfig()
>>> # Initializing a model (with random weights) from the facebook/dpr-ctx_encoder-single-nq-base style configuration
>>> model = DPRContextEncoder(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = 'dpr'
def __init__(self, vocab_size=30522, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, pad_token_id=0, position_embedding_type='absolute', projection_dim: int=0, **kwargs):
super().__init__(pad_token_id=pad_token_id, **kwargs)
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.projection_dim = projection_dim
self.position_embedding_type = position_embedding_type
|
class DPRConfig(PretrainedConfig):
'''
[`DPRConfig`] is the configuration class to store the configuration of a *DPRModel*.
This is the configuration class to store the configuration of a [`DPRContextEncoder`], [`DPRQuestionEncoder`], or a
[`DPRReader`]. It is used to instantiate the components of the DPR model according to the specified arguments,
defining the model component architectures. Instantiating a configuration with the defaults will yield a similar
configuration to that of the DPRContextEncoder
[facebook/dpr-ctx_encoder-single-nq-base](https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base)
architecture.
This class is a subclass of [`BertConfig`]. Please check the superclass for the documentation of all kwargs.
Args:
vocab_size (`int`, *optional*, defaults to 30522):
Vocabulary size of the DPR model. Defines the different tokens that can be represented by the *inputs_ids*
passed to the forward method of [`BertModel`].
hidden_size (`int`, *optional*, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (`int`, *optional*, defaults to 3072):
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"silu"` and `"gelu_new"` are supported.
hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout ratio for the attention probabilities.
max_position_embeddings (`int`, *optional*, defaults to 512):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
type_vocab_size (`int`, *optional*, defaults to 2):
The vocabulary size of the *token_type_ids* passed into [`BertModel`].
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
The epsilon used by the layer normalization layers.
pad_token_id (`int`, *optional*, defaults to 0):
Padding token id.
position_embedding_type (`str`, *optional*, defaults to `"absolute"`):
Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For
positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to
[Self-Attention with Relative Position Representations (Shaw et al.)](https://huggingface.co/papers/1803.02155).
For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models
with Better Relative Position Embeddings (Huang et al.)](https://huggingface.co/papers/2009.13658).
projection_dim (`int`, *optional*, defaults to 0):
Dimension of the projection for the context and question encoders. If it is set to zero (default), then no
projection is done.
Example:
```python
>>> from transformers import DPRConfig, DPRContextEncoder
>>> # Initializing a DPR facebook/dpr-ctx_encoder-single-nq-base style configuration
>>> configuration = DPRConfig()
>>> # Initializing a model (with random weights) from the facebook/dpr-ctx_encoder-single-nq-base style configuration
>>> model = DPRContextEncoder(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```'''
def __init__(self, vocab_size=30522, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, pad_token_id=0, position_embedding_type='absolute', projection_dim: int=0, **kwargs):
pass
| 2
| 1
| 35
| 1
| 34
| 0
| 1
| 1.61
| 1
| 2
| 0
| 0
| 1
| 14
| 1
| 1
| 105
| 11
| 36
| 35
| 16
| 58
| 18
| 17
| 16
| 1
| 1
| 0
| 1
|
2,231
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/dpr/convert_dpr_original_checkpoint_to_pytorch.py
|
transformers.models.dpr.convert_dpr_original_checkpoint_to_pytorch.DPRContextEncoderState
|
from transformers import BertConfig, DPRConfig, DPRContextEncoder, DPRQuestionEncoder, DPRReader
class DPRContextEncoderState(DPRState):
def load_dpr_model(self):
model = DPRContextEncoder(DPRConfig(**BertConfig.get_config_dict('google-bert/bert-base-uncased')[0]))
print(f'Loading DPR biencoder from {self.src_file}')
saved_state = load_states_from_checkpoint(self.src_file)
encoder, prefix = (model.ctx_encoder, 'ctx_model.')
state_dict = {'bert_model.embeddings.position_ids': model.ctx_encoder.bert_model.embeddings.position_ids}
for key, value in saved_state.model_dict.items():
if key.startswith(prefix):
key = key[len(prefix):]
if not key.startswith('encode_proj.'):
key = 'bert_model.' + key
state_dict[key] = value
encoder.load_state_dict(state_dict)
return model
|
class DPRContextEncoderState(DPRState):
def load_dpr_model(self):
pass
| 2
| 0
| 15
| 0
| 14
| 1
| 4
| 0.07
| 1
| 3
| 0
| 0
| 1
| 0
| 1
| 4
| 16
| 0
| 15
| 7
| 13
| 1
| 15
| 7
| 13
| 4
| 1
| 3
| 4
|
2,232
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/dpr/convert_dpr_original_checkpoint_to_pytorch.py
|
transformers.models.dpr.convert_dpr_original_checkpoint_to_pytorch.DPRQuestionEncoderState
|
from transformers import BertConfig, DPRConfig, DPRContextEncoder, DPRQuestionEncoder, DPRReader
class DPRQuestionEncoderState(DPRState):
def load_dpr_model(self):
model = DPRQuestionEncoder(DPRConfig(**BertConfig.get_config_dict('google-bert/bert-base-uncased')[0]))
print(f'Loading DPR biencoder from {self.src_file}')
saved_state = load_states_from_checkpoint(self.src_file)
encoder, prefix = (model.question_encoder, 'question_model.')
state_dict = {'bert_model.embeddings.position_ids': model.question_encoder.bert_model.embeddings.position_ids}
for key, value in saved_state.model_dict.items():
if key.startswith(prefix):
key = key[len(prefix):]
if not key.startswith('encode_proj.'):
key = 'bert_model.' + key
state_dict[key] = value
encoder.load_state_dict(state_dict)
return model
|
class DPRQuestionEncoderState(DPRState):
def load_dpr_model(self):
pass
| 2
| 0
| 15
| 0
| 14
| 1
| 4
| 0.07
| 1
| 3
| 0
| 0
| 1
| 0
| 1
| 4
| 16
| 0
| 15
| 7
| 13
| 1
| 15
| 7
| 13
| 4
| 1
| 3
| 4
|
2,233
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/dpr/convert_dpr_original_checkpoint_to_pytorch.py
|
transformers.models.dpr.convert_dpr_original_checkpoint_to_pytorch.DPRReaderState
|
from transformers import BertConfig, DPRConfig, DPRContextEncoder, DPRQuestionEncoder, DPRReader
class DPRReaderState(DPRState):
def load_dpr_model(self):
model = DPRReader(DPRConfig(**BertConfig.get_config_dict('google-bert/bert-base-uncased')[0]))
print(f'Loading DPR reader from {self.src_file}')
saved_state = load_states_from_checkpoint(self.src_file)
state_dict = {'encoder.bert_model.embeddings.position_ids': model.span_predictor.encoder.bert_model.embeddings.position_ids}
for key, value in saved_state.model_dict.items():
if key.startswith('encoder.') and (not key.startswith('encoder.encode_proj')):
key = 'encoder.bert_model.' + key[len('encoder.'):]
state_dict[key] = value
model.span_predictor.load_state_dict(state_dict)
return model
|
class DPRReaderState(DPRState):
def load_dpr_model(self):
pass
| 2
| 0
| 14
| 0
| 13
| 1
| 3
| 0.07
| 1
| 3
| 0
| 0
| 1
| 0
| 1
| 4
| 15
| 0
| 14
| 6
| 12
| 1
| 12
| 6
| 10
| 3
| 1
| 2
| 3
|
2,234
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/dpr/convert_dpr_original_checkpoint_to_pytorch.py
|
transformers.models.dpr.convert_dpr_original_checkpoint_to_pytorch.DPRState
|
from pathlib import Path
class DPRState:
def __init__(self, src_file: Path):
self.src_file = src_file
def load_dpr_model(self):
raise NotImplementedError
@staticmethod
def from_type(comp_type: str, *args, **kwargs) -> 'DPRState':
if comp_type.startswith('c'):
return DPRContextEncoderState(*args, **kwargs)
if comp_type.startswith('q'):
return DPRQuestionEncoderState(*args, **kwargs)
if comp_type.startswith('r'):
return DPRReaderState(*args, **kwargs)
else:
raise ValueError("Component type must be either 'ctx_encoder', 'question_encoder' or 'reader'.")
|
class DPRState:
def __init__(self, src_file: Path):
pass
def load_dpr_model(self):
pass
@staticmethod
def from_type(comp_type: str, *args, **kwargs) -> 'DPRState':
pass
| 5
| 0
| 4
| 0
| 4
| 0
| 2
| 0
| 0
| 7
| 3
| 3
| 2
| 1
| 3
| 3
| 17
| 2
| 15
| 6
| 10
| 0
| 13
| 5
| 9
| 4
| 0
| 1
| 6
|
2,235
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/dpr/modeling_dpr.py
|
transformers.models.dpr.modeling_dpr.DPRContextEncoder
|
from .configuration_dpr import DPRConfig
from typing import Optional, Union
from torch import Tensor, nn
import torch
from ...utils import ModelOutput, auto_docstring, logging
@auto_docstring(custom_intro='\n The bare DPRContextEncoder transformer outputting pooler outputs as context representations.\n ')
class DPRContextEncoder(DPRPretrainedContextEncoder):
def __init__(self, config: DPRConfig):
super().__init__(config)
self.config = config
self.ctx_encoder = DPREncoder(config)
self.post_init()
@auto_docstring
def forward(self, input_ids: Optional[Tensor]=None, attention_mask: Optional[Tensor]=None, token_type_ids: Optional[Tensor]=None, inputs_embeds: Optional[Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[DPRContextEncoderOutput, tuple[Tensor, ...]]:
"""
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. To match pretraining, DPR input sequence should be
formatted with [CLS] and [SEP] tokens as follows:
(a) For sequence pairs (for a pair title+text for example):
```
tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
token_type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
```
(b) For single sequences (for a question for example):
```
tokens: [CLS] the dog is hairy . [SEP]
token_type_ids: 0 0 0 0 0 0 0
```
DPR is a model with absolute position embeddings so it's usually advised to pad the inputs on the right
rather than the left.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
Examples:
```python
>>> from transformers import DPRContextEncoder, DPRContextEncoderTokenizer
>>> tokenizer = DPRContextEncoderTokenizer.from_pretrained("facebook/dpr-ctx_encoder-single-nq-base")
>>> model = DPRContextEncoder.from_pretrained("facebook/dpr-ctx_encoder-single-nq-base")
>>> input_ids = tokenizer("Hello, is my dog cute ?", return_tensors="pt")["input_ids"]
>>> embeddings = model(input_ids).pooler_output
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time')
elif input_ids is not None:
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError('You have to specify either input_ids or inputs_embeds')
device = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
attention_mask = torch.ones(input_shape, device=device) if input_ids is None else input_ids != self.config.pad_token_id
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
outputs = self.ctx_encoder(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
if not return_dict:
return outputs[1:]
return DPRContextEncoderOutput(pooler_output=outputs.pooler_output, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
|
@auto_docstring(custom_intro='\n The bare DPRContextEncoder transformer outputting pooler outputs as context representations.\n ')
class DPRContextEncoder(DPRPretrainedContextEncoder):
def __init__(self, config: DPRConfig):
pass
@auto_docstring
def forward(self, input_ids: Optional[Tensor]=None, attention_mask: Optional[Tensor]=None, token_type_ids: Optional[Tensor]=None, inputs_embeds: Optional[Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[DPRContextEncoderOutput, tuple[Tensor, ...]]:
'''
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. To match pretraining, DPR input sequence should be
formatted with [CLS] and [SEP] tokens as follows:
(a) For sequence pairs (for a pair title+text for example):
```
tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
token_type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
```
(b) For single sequences (for a question for example):
```
tokens: [CLS] the dog is hairy . [SEP]
token_type_ids: 0 0 0 0 0 0 0
```
DPR is a model with absolute position embeddings so it's usually advised to pad the inputs on the right
rather than the left.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
Examples:
```python
>>> from transformers import DPRContextEncoder, DPRContextEncoderTokenizer
>>> tokenizer = DPRContextEncoderTokenizer.from_pretrained("facebook/dpr-ctx_encoder-single-nq-base")
>>> model = DPRContextEncoder.from_pretrained("facebook/dpr-ctx_encoder-single-nq-base")
>>> input_ids = tokenizer("Hello, is my dog cute ?", return_tensors="pt")["input_ids"]
>>> embeddings = model(input_ids).pooler_output
```'''
pass
| 5
| 1
| 36
| 5
| 26
| 6
| 7
| 0.2
| 1
| 7
| 3
| 0
| 2
| 2
| 2
| 3
| 75
| 10
| 54
| 18
| 40
| 11
| 24
| 8
| 21
| 12
| 3
| 1
| 13
|
2,236
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/dpr/modeling_dpr.py
|
transformers.models.dpr.modeling_dpr.DPRContextEncoderOutput
|
import torch
from dataclasses import dataclass
from ...utils import ModelOutput, auto_docstring, logging
from typing import Optional, Union
@dataclass
@auto_docstring(custom_intro='\n Class for outputs of [`DPRQuestionEncoder`].\n ')
class DPRContextEncoderOutput(ModelOutput):
"""
pooler_output (`torch.FloatTensor` of shape `(batch_size, embeddings_size)`):
The DPR encoder outputs the *pooler_output* that corresponds to the context representation. Last layer
hidden-state of the first token of the sequence (classification token) further processed by a Linear layer.
This output is to be used to embed contexts for nearest neighbors queries with questions embeddings.
"""
pooler_output: torch.FloatTensor
hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
attentions: Optional[tuple[torch.FloatTensor, ...]] = None
|
@dataclass
@auto_docstring(custom_intro='\n Class for outputs of [`DPRQuestionEncoder`].\n ')
class DPRContextEncoderOutput(ModelOutput):
'''
pooler_output (`torch.FloatTensor` of shape `(batch_size, embeddings_size)`):
The DPR encoder outputs the *pooler_output* that corresponds to the context representation. Last layer
hidden-state of the first token of the sequence (classification token) further processed by a Linear layer.
This output is to be used to embed contexts for nearest neighbors queries with questions embeddings.
'''
pass
| 3
| 1
| 0
| 0
| 0
| 0
| 0
| 4.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 25
| 4
| 4
| 3
| 3
| 17
| 4
| 3
| 3
| 0
| 1
| 0
| 0
|
2,237
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/dpr/modeling_dpr.py
|
transformers.models.dpr.modeling_dpr.DPREncoder
|
from torch import Tensor, nn
from typing import Optional, Union
from ..bert.modeling_bert import BertModel
from .configuration_dpr import DPRConfig
from ...modeling_outputs import BaseModelOutputWithPooling
class DPREncoder(DPRPreTrainedModel):
base_model_prefix = 'bert_model'
def __init__(self, config: DPRConfig):
super().__init__(config)
self.bert_model = BertModel(config, add_pooling_layer=False)
if self.bert_model.config.hidden_size <= 0:
raise ValueError("Encoder hidden_size can't be zero")
self.projection_dim = config.projection_dim
if self.projection_dim > 0:
self.encode_proj = nn.Linear(self.bert_model.config.hidden_size, config.projection_dim)
self.post_init()
def forward(self, input_ids: Tensor, attention_mask: Optional[Tensor]=None, token_type_ids: Optional[Tensor]=None, inputs_embeds: Optional[Tensor]=None, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=False) -> Union[BaseModelOutputWithPooling, tuple[Tensor, ...]]:
outputs = self.bert_model(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
sequence_output = outputs[0]
pooled_output = sequence_output[:, 0, :]
if self.projection_dim > 0:
pooled_output = self.encode_proj(pooled_output)
if not return_dict:
return (sequence_output, pooled_output) + outputs[2:]
return BaseModelOutputWithPooling(last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
@property
def embeddings_size(self) -> int:
if self.projection_dim > 0:
return self.encode_proj.out_features
return self.bert_model.config.hidden_size
|
class DPREncoder(DPRPreTrainedModel):
def __init__(self, config: DPRConfig):
pass
def forward(self, input_ids: Tensor, attention_mask: Optional[Tensor]=None, token_type_ids: Optional[Tensor]=None, inputs_embeds: Optional[Tensor]=None, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=False) -> Union[BaseModelOutputWithPooling, tuple[Tensor, ...]]:
pass
@property
def embeddings_size(self) -> int:
pass
| 5
| 0
| 16
| 1
| 15
| 0
| 3
| 0.02
| 1
| 8
| 3
| 0
| 3
| 3
| 3
| 4
| 54
| 6
| 47
| 21
| 33
| 1
| 24
| 11
| 20
| 3
| 2
| 1
| 8
|
2,238
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/dpr/modeling_dpr.py
|
transformers.models.dpr.modeling_dpr.DPRPreTrainedModel
|
from ...modeling_utils import PreTrainedModel
from torch import Tensor, nn
from ...utils import ModelOutput, auto_docstring, logging
@auto_docstring
class DPRPreTrainedModel(PreTrainedModel):
_supports_sdpa = True
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, nn.Linear):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
|
@auto_docstring
class DPRPreTrainedModel(PreTrainedModel):
def _init_weights(self, module):
'''Initialize the weights'''
pass
| 3
| 1
| 15
| 0
| 12
| 3
| 6
| 0.21
| 1
| 0
| 0
| 5
| 1
| 0
| 1
| 1
| 18
| 1
| 14
| 3
| 12
| 3
| 12
| 3
| 10
| 6
| 1
| 2
| 6
|
2,239
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/dpr/modeling_dpr.py
|
transformers.models.dpr.modeling_dpr.DPRPretrainedContextEncoder
|
from .configuration_dpr import DPRConfig
class DPRPretrainedContextEncoder(DPRPreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config: DPRConfig
base_model_prefix = 'ctx_encoder'
|
class DPRPretrainedContextEncoder(DPRPreTrainedModel):
'''
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
'''
pass
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 9
| 1
| 4
| 4
| 3
| 4
| 4
| 4
| 3
| 0
| 2
| 0
| 0
|
2,240
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/dpr/modeling_dpr.py
|
transformers.models.dpr.modeling_dpr.DPRPretrainedQuestionEncoder
|
from .configuration_dpr import DPRConfig
class DPRPretrainedQuestionEncoder(DPRPreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config: DPRConfig
base_model_prefix = 'question_encoder'
|
class DPRPretrainedQuestionEncoder(DPRPreTrainedModel):
'''
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
'''
pass
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 9
| 1
| 4
| 4
| 3
| 4
| 4
| 4
| 3
| 0
| 2
| 0
| 0
|
2,241
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/dpr/modeling_dpr.py
|
transformers.models.dpr.modeling_dpr.DPRPretrainedReader
|
from .configuration_dpr import DPRConfig
class DPRPretrainedReader(DPRPreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config: DPRConfig
base_model_prefix = 'span_predictor'
|
class DPRPretrainedReader(DPRPreTrainedModel):
'''
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
'''
pass
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 9
| 1
| 4
| 4
| 3
| 4
| 4
| 4
| 3
| 0
| 2
| 0
| 0
|
2,242
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/dpr/modeling_dpr.py
|
transformers.models.dpr.modeling_dpr.DPRQuestionEncoder
|
from .configuration_dpr import DPRConfig
from ...utils import ModelOutput, auto_docstring, logging
from torch import Tensor, nn
from typing import Optional, Union
import torch
@auto_docstring(custom_intro='\n The bare DPRQuestionEncoder transformer outputting pooler outputs as question representations.\n ')
class DPRQuestionEncoder(DPRPretrainedQuestionEncoder):
def __init__(self, config: DPRConfig):
super().__init__(config)
self.config = config
self.question_encoder = DPREncoder(config)
self.post_init()
@auto_docstring
def forward(self, input_ids: Optional[Tensor]=None, attention_mask: Optional[Tensor]=None, token_type_ids: Optional[Tensor]=None, inputs_embeds: Optional[Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[DPRQuestionEncoderOutput, tuple[Tensor, ...]]:
"""
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. To match pretraining, DPR input sequence should be
formatted with [CLS] and [SEP] tokens as follows:
(a) For sequence pairs (for a pair title+text for example):
```
tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
token_type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
```
(b) For single sequences (for a question for example):
```
tokens: [CLS] the dog is hairy . [SEP]
token_type_ids: 0 0 0 0 0 0 0
```
DPR is a model with absolute position embeddings so it's usually advised to pad the inputs on the right
rather than the left.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
Examples:
```python
>>> from transformers import DPRQuestionEncoder, DPRQuestionEncoderTokenizer
>>> tokenizer = DPRQuestionEncoderTokenizer.from_pretrained("facebook/dpr-question_encoder-single-nq-base")
>>> model = DPRQuestionEncoder.from_pretrained("facebook/dpr-question_encoder-single-nq-base")
>>> input_ids = tokenizer("Hello, is my dog cute ?", return_tensors="pt")["input_ids"]
>>> embeddings = model(input_ids).pooler_output
```
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time')
elif input_ids is not None:
self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError('You have to specify either input_ids or inputs_embeds')
device = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
attention_mask = torch.ones(input_shape, device=device) if input_ids is None else input_ids != self.config.pad_token_id
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
outputs = self.question_encoder(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
if not return_dict:
return outputs[1:]
return DPRQuestionEncoderOutput(pooler_output=outputs.pooler_output, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
|
@auto_docstring(custom_intro='\n The bare DPRQuestionEncoder transformer outputting pooler outputs as question representations.\n ')
class DPRQuestionEncoder(DPRPretrainedQuestionEncoder):
def __init__(self, config: DPRConfig):
pass
@auto_docstring
def forward(self, input_ids: Optional[Tensor]=None, attention_mask: Optional[Tensor]=None, token_type_ids: Optional[Tensor]=None, inputs_embeds: Optional[Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[DPRQuestionEncoderOutput, tuple[Tensor, ...]]:
'''
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. To match pretraining, DPR input sequence should be
formatted with [CLS] and [SEP] tokens as follows:
(a) For sequence pairs (for a pair title+text for example):
```
tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
token_type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
```
(b) For single sequences (for a question for example):
```
tokens: [CLS] the dog is hairy . [SEP]
token_type_ids: 0 0 0 0 0 0 0
```
DPR is a model with absolute position embeddings so it's usually advised to pad the inputs on the right
rather than the left.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
Examples:
```python
>>> from transformers import DPRQuestionEncoder, DPRQuestionEncoderTokenizer
>>> tokenizer = DPRQuestionEncoderTokenizer.from_pretrained("facebook/dpr-question_encoder-single-nq-base")
>>> model = DPRQuestionEncoder.from_pretrained("facebook/dpr-question_encoder-single-nq-base")
>>> input_ids = tokenizer("Hello, is my dog cute ?", return_tensors="pt")["input_ids"]
>>> embeddings = model(input_ids).pooler_output
```
'''
pass
| 5
| 1
| 36
| 4
| 26
| 6
| 7
| 0.22
| 1
| 7
| 3
| 0
| 2
| 2
| 2
| 3
| 76
| 9
| 55
| 18
| 41
| 12
| 25
| 8
| 22
| 12
| 3
| 1
| 13
|
2,243
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/dpr/modeling_dpr.py
|
transformers.models.dpr.modeling_dpr.DPRQuestionEncoderOutput
|
import torch
from typing import Optional, Union
from ...utils import ModelOutput, auto_docstring, logging
from dataclasses import dataclass
@dataclass
@auto_docstring(custom_intro='\n Class for outputs of [`DPRQuestionEncoder`].\n ')
class DPRQuestionEncoderOutput(ModelOutput):
"""
pooler_output (`torch.FloatTensor` of shape `(batch_size, embeddings_size)`):
The DPR encoder outputs the *pooler_output* that corresponds to the question representation. Last layer
hidden-state of the first token of the sequence (classification token) further processed by a Linear layer.
This output is to be used to embed questions for nearest neighbors queries with context embeddings.
"""
pooler_output: torch.FloatTensor
hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
attentions: Optional[tuple[torch.FloatTensor, ...]] = None
|
@dataclass
@auto_docstring(custom_intro='\n Class for outputs of [`DPRQuestionEncoder`].\n ')
class DPRQuestionEncoderOutput(ModelOutput):
'''
pooler_output (`torch.FloatTensor` of shape `(batch_size, embeddings_size)`):
The DPR encoder outputs the *pooler_output* that corresponds to the question representation. Last layer
hidden-state of the first token of the sequence (classification token) further processed by a Linear layer.
This output is to be used to embed questions for nearest neighbors queries with context embeddings.
'''
pass
| 3
| 1
| 0
| 0
| 0
| 0
| 0
| 4.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 25
| 4
| 4
| 3
| 3
| 17
| 4
| 3
| 3
| 0
| 1
| 0
| 0
|
2,244
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/dpr/modeling_dpr.py
|
transformers.models.dpr.modeling_dpr.DPRReader
|
import torch
from ...utils import ModelOutput, auto_docstring, logging
from .configuration_dpr import DPRConfig
from typing import Optional, Union
from torch import Tensor, nn
@auto_docstring(custom_intro='\n The bare DPRReader transformer outputting span predictions.\n ')
class DPRReader(DPRPretrainedReader):
def __init__(self, config: DPRConfig):
super().__init__(config)
self.config = config
self.span_predictor = DPRSpanPredictor(config)
self.post_init()
@auto_docstring
def forward(self, input_ids: Optional[Tensor]=None, attention_mask: Optional[Tensor]=None, inputs_embeds: Optional[Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[DPRReaderOutput, tuple[Tensor, ...]]:
"""
input_ids (`tuple[torch.LongTensor]` of shapes `(n_passages, sequence_length)`):
Indices of input sequence tokens in the vocabulary. It has to be a sequence triplet with 1) the question
and 2) the passages titles and 3) the passages texts To match pretraining, DPR `input_ids` sequence should
be formatted with [CLS] and [SEP] with the format:
`[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>`
DPR is a model with absolute position embeddings so it's usually advised to pad the inputs on the right
rather than the left.
Indices can be obtained using [`DPRReaderTokenizer`]. See this class documentation for more details.
[What are input IDs?](../glossary#input-ids)
inputs_embeds (`torch.FloatTensor` of shape `(n_passages, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
model's internal embedding lookup matrix.
Examples:
```python
>>> from transformers import DPRReader, DPRReaderTokenizer
>>> tokenizer = DPRReaderTokenizer.from_pretrained("facebook/dpr-reader-single-nq-base")
>>> model = DPRReader.from_pretrained("facebook/dpr-reader-single-nq-base")
>>> encoded_inputs = tokenizer(
... questions=["What is love ?"],
... titles=["Haddaway"],
... texts=["'What Is Love' is a song recorded by the artist Haddaway"],
... return_tensors="pt",
... )
>>> outputs = model(**encoded_inputs)
>>> start_logits = outputs.start_logits
>>> end_logits = outputs.end_logits
>>> relevance_logits = outputs.relevance_logits
```
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time')
elif input_ids is not None:
self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError('You have to specify either input_ids or inputs_embeds')
device = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
attention_mask = torch.ones(input_shape, device=device)
return self.span_predictor(input_ids, attention_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
| null | 5
| 1
| 33
| 4
| 20
| 10
| 5
| 0.48
| 1
| 7
| 3
| 0
| 2
| 2
| 2
| 3
| 70
| 8
| 42
| 16
| 29
| 20
| 20
| 7
| 17
| 9
| 3
| 1
| 10
|
2,245
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/dpr/modeling_dpr.py
|
transformers.models.dpr.modeling_dpr.DPRReaderOutput
|
import torch
from typing import Optional, Union
from dataclasses import dataclass
from ...utils import ModelOutput, auto_docstring, logging
@dataclass
@auto_docstring(custom_intro='\n Class for outputs of [`DPRQuestionEncoder`].\n ')
class DPRReaderOutput(ModelOutput):
"""
start_logits (`torch.FloatTensor` of shape `(n_passages, sequence_length)`):
Logits of the start index of the span for each passage.
end_logits (`torch.FloatTensor` of shape `(n_passages, sequence_length)`):
Logits of the end index of the span for each passage.
relevance_logits (`torch.FloatTensor` of shape `(n_passages, )`):
Outputs of the QA classifier of the DPRReader that corresponds to the scores of each passage to answer the
question, compared to all the other passages.
"""
start_logits: torch.FloatTensor
end_logits: Optional[torch.FloatTensor] = None
relevance_logits: Optional[torch.FloatTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
attentions: Optional[tuple[torch.FloatTensor, ...]] = None
| null | 3
| 1
| 0
| 0
| 0
| 0
| 0
| 3.33
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 30
| 4
| 6
| 5
| 5
| 20
| 6
| 5
| 5
| 0
| 1
| 0
| 0
|
2,246
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/dpr/modeling_dpr.py
|
transformers.models.dpr.modeling_dpr.DPRSpanPredictor
|
from .configuration_dpr import DPRConfig
from torch import Tensor, nn
from typing import Optional, Union
class DPRSpanPredictor(DPRPreTrainedModel):
base_model_prefix = 'encoder'
def __init__(self, config: DPRConfig):
super().__init__(config)
self.encoder = DPREncoder(config)
self.qa_outputs = nn.Linear(self.encoder.embeddings_size, 2)
self.qa_classifier = nn.Linear(self.encoder.embeddings_size, 1)
self.post_init()
def forward(self, input_ids: Tensor, attention_mask: Tensor, inputs_embeds: Optional[Tensor]=None, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=False) -> Union[DPRReaderOutput, tuple[Tensor, ...]]:
n_passages, sequence_length = input_ids.size() if input_ids is not None else inputs_embeds.size()[:2]
outputs = self.encoder(input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1).contiguous()
end_logits = end_logits.squeeze(-1).contiguous()
relevance_logits = self.qa_classifier(sequence_output[:, 0, :])
start_logits = start_logits.view(n_passages, sequence_length)
end_logits = end_logits.view(n_passages, sequence_length)
relevance_logits = relevance_logits.view(n_passages)
if not return_dict:
return (start_logits, end_logits, relevance_logits) + outputs[2:]
return DPRReaderOutput(start_logits=start_logits, end_logits=end_logits, relevance_logits=relevance_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
|
class DPRSpanPredictor(DPRPreTrainedModel):
def __init__(self, config: DPRConfig):
pass
def forward(self, input_ids: Tensor, attention_mask: Tensor, inputs_embeds: Optional[Tensor]=None, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=False) -> Union[DPRReaderOutput, tuple[Tensor, ...]]:
pass
| 3
| 0
| 26
| 2
| 21
| 3
| 2
| 0.11
| 1
| 6
| 3
| 0
| 2
| 3
| 2
| 3
| 55
| 6
| 44
| 21
| 33
| 5
| 23
| 13
| 20
| 3
| 2
| 1
| 4
|
2,247
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/dpr/tokenization_dpr.py
|
transformers.models.dpr.tokenization_dpr.CustomDPRReaderTokenizerMixin
|
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from typing import Optional, Union
from ...tokenization_utils_base import BatchEncoding
@add_start_docstrings(CUSTOM_DPR_READER_DOCSTRING)
class CustomDPRReaderTokenizerMixin:
def __call__(self, questions, titles: Optional[str]=None, texts: Optional[str]=None, padding: Union[bool, str]=False, truncation: Union[bool, str]=False, max_length: Optional[int]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_attention_mask: Optional[bool]=None, **kwargs) -> BatchEncoding:
if titles is None and texts is None:
return super().__call__(questions, padding=padding, truncation=truncation, max_length=max_length, return_tensors=return_tensors, return_attention_mask=return_attention_mask, **kwargs)
elif titles is None or texts is None:
text_pair = titles if texts is None else texts
return super().__call__(questions, text_pair, padding=padding, truncation=truncation, max_length=max_length, return_tensors=return_tensors, return_attention_mask=return_attention_mask, **kwargs)
titles = titles if not isinstance(titles, str) else [titles]
texts = texts if not isinstance(texts, str) else [texts]
n_passages = len(titles)
questions = questions if not isinstance(questions, str) else [questions] * n_passages
if len(titles) != len(texts):
raise ValueError(f'There should be as many titles than texts but got {len(titles)} titles and {len(texts)} texts.')
encoded_question_and_titles = super().__call__(questions, titles, padding=False, truncation=False)['input_ids']
encoded_texts = super().__call__(texts, add_special_tokens=False, padding=False, truncation=False)['input_ids']
encoded_inputs = {'input_ids': [(encoded_question_and_title + encoded_text)[:max_length] if max_length is not None and truncation else encoded_question_and_title + encoded_text for encoded_question_and_title, encoded_text in zip(encoded_question_and_titles, encoded_texts)]}
if return_attention_mask is not False:
attention_mask = []
for input_ids in encoded_inputs['input_ids']:
attention_mask.append([int(input_id != self.pad_token_id) for input_id in input_ids])
encoded_inputs['attention_mask'] = attention_mask
return self.pad(encoded_inputs, padding=padding, max_length=max_length, return_tensors=return_tensors)
def decode_best_spans(self, reader_input: BatchEncoding, reader_output: DPRReaderOutput, num_spans: int=16, max_answer_length: int=64, num_spans_per_passage: int=4) -> list[DPRSpanPrediction]:
"""
Get the span predictions for the extractive Q&A model.
Returns: *List* of *DPRReaderOutput* sorted by descending *(relevance_score, span_score)*. Each
*DPRReaderOutput* is a *Tuple* with:
- **span_score**: `float` that corresponds to the score given by the reader for this span compared to other
spans in the same passage. It corresponds to the sum of the start and end logits of the span.
- **relevance_score**: `float` that corresponds to the score of the each passage to answer the question,
compared to all the other passages. It corresponds to the output of the QA classifier of the DPRReader.
- **doc_id**: `int` the id of the passage. - **start_index**: `int` the start index of the span
(inclusive). - **end_index**: `int` the end index of the span (inclusive).
Examples:
```python
>>> from transformers import DPRReader, DPRReaderTokenizer
>>> tokenizer = DPRReaderTokenizer.from_pretrained("facebook/dpr-reader-single-nq-base")
>>> model = DPRReader.from_pretrained("facebook/dpr-reader-single-nq-base")
>>> encoded_inputs = tokenizer(
... questions=["What is love ?"],
... titles=["Haddaway"],
... texts=["'What Is Love' is a song recorded by the artist Haddaway"],
... return_tensors="pt",
... )
>>> outputs = model(**encoded_inputs)
>>> predicted_spans = tokenizer.decode_best_spans(encoded_inputs, outputs)
>>> print(predicted_spans[0].text) # best span
a song
```"""
input_ids = reader_input['input_ids']
start_logits, end_logits, relevance_logits = reader_output[:3]
n_passages = len(relevance_logits)
sorted_docs = sorted(range(n_passages), reverse=True, key=relevance_logits.__getitem__)
nbest_spans_predictions: list[DPRReaderOutput] = []
for doc_id in sorted_docs:
sequence_ids = list(input_ids[doc_id])
passage_offset = sequence_ids.index(self.sep_token_id, 2) + 1
if sequence_ids[-1] == self.pad_token_id:
sequence_len = sequence_ids.index(self.pad_token_id)
else:
sequence_len = len(sequence_ids)
best_spans = self._get_best_spans(start_logits=start_logits[doc_id][passage_offset:sequence_len], end_logits=end_logits[doc_id][passage_offset:sequence_len], max_answer_length=max_answer_length, top_spans=num_spans_per_passage)
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(DPRSpanPrediction(span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index], relevance_score=relevance_logits[doc_id], doc_id=doc_id, start_index=start_index, end_index=end_index, text=self.decode(sequence_ids[start_index:end_index + 1])))
if len(nbest_spans_predictions) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def _get_best_spans(self, start_logits: list[int], end_logits: list[int], max_answer_length: int, top_spans: int) -> list[DPRSpanPrediction]:
"""
Finds the best answer span for the extractive Q&A model for one passage. It returns the best span by descending
`span_score` order and keeping max `top_spans` spans. Spans longer that `max_answer_length` are ignored.
"""
scores = []
for start_index, start_score in enumerate(start_logits):
for answer_length, end_score in enumerate(end_logits[start_index:start_index + max_answer_length]):
scores.append(((start_index, start_index + answer_length), start_score + end_score))
scores = sorted(scores, key=lambda x: x[1], reverse=True)
chosen_span_intervals = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(f'Wrong span indices: [{start_index}:{end_index}]')
length = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(f'Span is too long: {length} > {max_answer_length}')
if any((start_index <= prev_start_index <= prev_end_index <= end_index or prev_start_index <= start_index <= end_index <= prev_end_index for prev_start_index, prev_end_index in chosen_span_intervals)):
continue
chosen_span_intervals.append((start_index, end_index))
if len(chosen_span_intervals) == top_spans:
break
return chosen_span_intervals
| null | 5
| 2
| 56
| 2
| 43
| 11
| 8
| 0.25
| 0
| 10
| 1
| 1
| 3
| 0
| 3
| 3
| 170
| 9
| 130
| 52
| 102
| 32
| 60
| 28
| 56
| 11
| 0
| 2
| 24
|
2,248
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/dpr/tokenization_dpr.py
|
transformers.models.dpr.tokenization_dpr.DPRContextEncoderTokenizer
|
from ..bert.tokenization_bert import BertTokenizer
class DPRContextEncoderTokenizer(BertTokenizer):
"""
Construct a DPRContextEncoder tokenizer.
[`DPRContextEncoderTokenizer`] is identical to [`BertTokenizer`] and runs end-to-end tokenization: punctuation
splitting and wordpiece.
Refer to superclass [`BertTokenizer`] for usage examples and documentation concerning parameters.
"""
vocab_files_names = VOCAB_FILES_NAMES
|
class DPRContextEncoderTokenizer(BertTokenizer):
'''
Construct a DPRContextEncoder tokenizer.
[`DPRContextEncoderTokenizer`] is identical to [`BertTokenizer`] and runs end-to-end tokenization: punctuation
splitting and wordpiece.
Refer to superclass [`BertTokenizer`] for usage examples and documentation concerning parameters.
'''
pass
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 3
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 101
| 11
| 3
| 2
| 2
| 1
| 6
| 2
| 2
| 1
| 0
| 4
| 0
| 0
|
2,249
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/dpr/tokenization_dpr.py
|
transformers.models.dpr.tokenization_dpr.DPRQuestionEncoderTokenizer
|
from ..bert.tokenization_bert import BertTokenizer
class DPRQuestionEncoderTokenizer(BertTokenizer):
"""
Constructs a DPRQuestionEncoder tokenizer.
[`DPRQuestionEncoderTokenizer`] is identical to [`BertTokenizer`] and runs end-to-end tokenization: punctuation
splitting and wordpiece.
Refer to superclass [`BertTokenizer`] for usage examples and documentation concerning parameters.
"""
vocab_files_names = VOCAB_FILES_NAMES
|
class DPRQuestionEncoderTokenizer(BertTokenizer):
'''
Constructs a DPRQuestionEncoder tokenizer.
[`DPRQuestionEncoderTokenizer`] is identical to [`BertTokenizer`] and runs end-to-end tokenization: punctuation
splitting and wordpiece.
Refer to superclass [`BertTokenizer`] for usage examples and documentation concerning parameters.
'''
pass
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 3
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 101
| 11
| 3
| 2
| 2
| 1
| 6
| 2
| 2
| 1
| 0
| 4
| 0
| 0
|
2,250
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/dpr/tokenization_dpr.py
|
transformers.models.dpr.tokenization_dpr.DPRReaderTokenizer
|
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
@add_end_docstrings(CUSTOM_DPR_READER_DOCSTRING)
class DPRReaderTokenizer(CustomDPRReaderTokenizerMixin, BertTokenizer):
"""
Construct a DPRReader tokenizer.
[`DPRReaderTokenizer`] is almost identical to [`BertTokenizer`] and runs end-to-end tokenization: punctuation
splitting and wordpiece. The difference is that is has three inputs strings: question, titles and texts that are
combined to be fed to the [`DPRReader`] model.
Refer to superclass [`BertTokenizer`] for usage examples and documentation concerning parameters.
"""
vocab_files_names = VOCAB_FILES_NAMES
model_input_names = ['input_ids', 'attention_mask']
|
@add_end_docstrings(CUSTOM_DPR_READER_DOCSTRING)
class DPRReaderTokenizer(CustomDPRReaderTokenizerMixin, BertTokenizer):
'''
Construct a DPRReader tokenizer.
[`DPRReaderTokenizer`] is almost identical to [`BertTokenizer`] and runs end-to-end tokenization: punctuation
splitting and wordpiece. The difference is that is has three inputs strings: question, titles and texts that are
combined to be fed to the [`DPRReader`] model.
Refer to superclass [`BertTokenizer`] for usage examples and documentation concerning parameters.
'''
pass
| 2
| 1
| 0
| 0
| 0
| 0
| 0
| 2.33
| 2
| 0
| 0
| 0
| 0
| 0
| 0
| 104
| 13
| 3
| 3
| 3
| 2
| 7
| 3
| 3
| 2
| 0
| 4
| 0
| 0
|
2,251
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/dpr/tokenization_dpr_fast.py
|
transformers.models.dpr.tokenization_dpr_fast.CustomDPRReaderTokenizerMixin
|
from typing import Optional, Union
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ...tokenization_utils_base import BatchEncoding
@add_start_docstrings(CUSTOM_DPR_READER_DOCSTRING)
class CustomDPRReaderTokenizerMixin:
def __call__(self, questions, titles: Optional[str]=None, texts: Optional[str]=None, padding: Union[bool, str]=False, truncation: Union[bool, str]=False, max_length: Optional[int]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_attention_mask: Optional[bool]=None, **kwargs) -> BatchEncoding:
if titles is None and texts is None:
return super().__call__(questions, padding=padding, truncation=truncation, max_length=max_length, return_tensors=return_tensors, return_attention_mask=return_attention_mask, **kwargs)
elif titles is None or texts is None:
text_pair = titles if texts is None else texts
return super().__call__(questions, text_pair, padding=padding, truncation=truncation, max_length=max_length, return_tensors=return_tensors, return_attention_mask=return_attention_mask, **kwargs)
titles = titles if not isinstance(titles, str) else [titles]
texts = texts if not isinstance(texts, str) else [texts]
n_passages = len(titles)
questions = questions if not isinstance(questions, str) else [questions] * n_passages
assert len(titles) == len(texts), f'There should be as many titles than texts but got {len(titles)} titles and {len(texts)} texts.'
encoded_question_and_titles = super().__call__(questions, titles, padding=False, truncation=False)['input_ids']
encoded_texts = super().__call__(texts, add_special_tokens=False, padding=False, truncation=False)['input_ids']
encoded_inputs = {'input_ids': [(encoded_question_and_title + encoded_text)[:max_length] if max_length is not None and truncation else encoded_question_and_title + encoded_text for encoded_question_and_title, encoded_text in zip(encoded_question_and_titles, encoded_texts)]}
if return_attention_mask is not False:
attention_mask = []
for input_ids in encoded_inputs['input_ids']:
attention_mask.append([int(input_id != self.pad_token_id) for input_id in input_ids])
encoded_inputs['attention_mask'] = attention_mask
return self.pad(encoded_inputs, padding=padding, max_length=max_length, return_tensors=return_tensors)
def decode_best_spans(self, reader_input: BatchEncoding, reader_output: DPRReaderOutput, num_spans: int=16, max_answer_length: int=64, num_spans_per_passage: int=4) -> list[DPRSpanPrediction]:
"""
Get the span predictions for the extractive Q&A model.
Returns: *List* of *DPRReaderOutput* sorted by descending *(relevance_score, span_score)*. Each
*DPRReaderOutput* is a *Tuple* with:
- **span_score**: `float` that corresponds to the score given by the reader for this span compared to other
spans in the same passage. It corresponds to the sum of the start and end logits of the span.
- **relevance_score**: `float` that corresponds to the score of the each passage to answer the question,
compared to all the other passages. It corresponds to the output of the QA classifier of the DPRReader.
- **doc_id**: `int` the id of the passage. - ***start_index**: `int` the start index of the span
(inclusive). - **end_index**: `int` the end index of the span (inclusive).
Examples:
```python
>>> from transformers import DPRReader, DPRReaderTokenizer
>>> tokenizer = DPRReaderTokenizer.from_pretrained("facebook/dpr-reader-single-nq-base")
>>> model = DPRReader.from_pretrained("facebook/dpr-reader-single-nq-base")
>>> encoded_inputs = tokenizer(
... questions=["What is love ?"],
... titles=["Haddaway"],
... texts=["'What Is Love' is a song recorded by the artist Haddaway"],
... return_tensors="pt",
... )
>>> outputs = model(**encoded_inputs)
>>> predicted_spans = tokenizer.decode_best_spans(encoded_inputs, outputs)
>>> print(predicted_spans[0].text) # best span
a song
```"""
input_ids = reader_input['input_ids']
start_logits, end_logits, relevance_logits = reader_output[:3]
n_passages = len(relevance_logits)
sorted_docs = sorted(range(n_passages), reverse=True, key=relevance_logits.__getitem__)
nbest_spans_predictions: list[DPRReaderOutput] = []
for doc_id in sorted_docs:
sequence_ids = list(input_ids[doc_id])
passage_offset = sequence_ids.index(self.sep_token_id, 2) + 1
if sequence_ids[-1] == self.pad_token_id:
sequence_len = sequence_ids.index(self.pad_token_id)
else:
sequence_len = len(sequence_ids)
best_spans = self._get_best_spans(start_logits=start_logits[doc_id][passage_offset:sequence_len], end_logits=end_logits[doc_id][passage_offset:sequence_len], max_answer_length=max_answer_length, top_spans=num_spans_per_passage)
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(DPRSpanPrediction(span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index], relevance_score=relevance_logits[doc_id], doc_id=doc_id, start_index=start_index, end_index=end_index, text=self.decode(sequence_ids[start_index:end_index + 1])))
if len(nbest_spans_predictions) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def _get_best_spans(self, start_logits: list[int], end_logits: list[int], max_answer_length: int, top_spans: int) -> list[DPRSpanPrediction]:
"""
Finds the best answer span for the extractive Q&A model for one passage. It returns the best span by descending
`span_score` order and keeping max `top_spans` spans. Spans longer that `max_answer_length` are ignored.
"""
scores = []
for start_index, start_score in enumerate(start_logits):
for answer_length, end_score in enumerate(end_logits[start_index:start_index + max_answer_length]):
scores.append(((start_index, start_index + answer_length), start_score + end_score))
scores = sorted(scores, key=lambda x: x[1], reverse=True)
chosen_span_intervals = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, f'Wrong span indices: [{start_index}:{end_index}]'
length = end_index - start_index + 1
assert length <= max_answer_length, f'Span is too long: {length} > {max_answer_length}'
if any((start_index <= prev_start_index <= prev_end_index <= end_index or prev_start_index <= start_index <= end_index <= prev_end_index for prev_start_index, prev_end_index in chosen_span_intervals)):
continue
chosen_span_intervals.append((start_index, end_index))
if len(chosen_span_intervals) == top_spans:
break
return chosen_span_intervals
| null | 5
| 2
| 55
| 2
| 42
| 11
| 7
| 0.25
| 0
| 9
| 1
| 1
| 3
| 0
| 3
| 3
| 167
| 9
| 127
| 52
| 99
| 32
| 57
| 28
| 53
| 10
| 0
| 2
| 21
|
2,252
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/dpr/tokenization_dpr_fast.py
|
transformers.models.dpr.tokenization_dpr_fast.DPRContextEncoderTokenizerFast
|
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
class DPRContextEncoderTokenizerFast(BertTokenizerFast):
"""
Construct a "fast" DPRContextEncoder tokenizer (backed by HuggingFace's *tokenizers* library).
[`DPRContextEncoderTokenizerFast`] is identical to [`BertTokenizerFast`] and runs end-to-end tokenization:
punctuation splitting and wordpiece.
Refer to superclass [`BertTokenizerFast`] for usage examples and documentation concerning parameters.
"""
vocab_files_names = VOCAB_FILES_NAMES
slow_tokenizer_class = DPRContextEncoderTokenizer
|
class DPRContextEncoderTokenizerFast(BertTokenizerFast):
'''
Construct a "fast" DPRContextEncoder tokenizer (backed by HuggingFace's *tokenizers* library).
[`DPRContextEncoderTokenizerFast`] is identical to [`BertTokenizerFast`] and runs end-to-end tokenization:
punctuation splitting and wordpiece.
Refer to superclass [`BertTokenizerFast`] for usage examples and documentation concerning parameters.
'''
pass
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 2
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 92
| 12
| 3
| 3
| 3
| 2
| 6
| 3
| 3
| 2
| 0
| 4
| 0
| 0
|
2,253
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/dpr/tokenization_dpr_fast.py
|
transformers.models.dpr.tokenization_dpr_fast.DPRQuestionEncoderTokenizerFast
|
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
class DPRQuestionEncoderTokenizerFast(BertTokenizerFast):
"""
Constructs a "fast" DPRQuestionEncoder tokenizer (backed by HuggingFace's *tokenizers* library).
[`DPRQuestionEncoderTokenizerFast`] is identical to [`BertTokenizerFast`] and runs end-to-end tokenization:
punctuation splitting and wordpiece.
Refer to superclass [`BertTokenizerFast`] for usage examples and documentation concerning parameters.
"""
vocab_files_names = VOCAB_FILES_NAMES
slow_tokenizer_class = DPRQuestionEncoderTokenizer
|
class DPRQuestionEncoderTokenizerFast(BertTokenizerFast):
'''
Constructs a "fast" DPRQuestionEncoder tokenizer (backed by HuggingFace's *tokenizers* library).
[`DPRQuestionEncoderTokenizerFast`] is identical to [`BertTokenizerFast`] and runs end-to-end tokenization:
punctuation splitting and wordpiece.
Refer to superclass [`BertTokenizerFast`] for usage examples and documentation concerning parameters.
'''
pass
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 2
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 92
| 12
| 3
| 3
| 3
| 2
| 6
| 3
| 3
| 2
| 0
| 4
| 0
| 0
|
2,254
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/dpr/tokenization_dpr_fast.py
|
transformers.models.dpr.tokenization_dpr_fast.DPRReaderTokenizerFast
|
from ..bert.tokenization_bert_fast import BertTokenizerFast
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
@add_end_docstrings(CUSTOM_DPR_READER_DOCSTRING)
class DPRReaderTokenizerFast(CustomDPRReaderTokenizerMixin, BertTokenizerFast):
"""
Constructs a "fast" DPRReader tokenizer (backed by HuggingFace's *tokenizers* library).
[`DPRReaderTokenizerFast`] is almost identical to [`BertTokenizerFast`] and runs end-to-end tokenization:
punctuation splitting and wordpiece. The difference is that is has three inputs strings: question, titles and texts
that are combined to be fed to the [`DPRReader`] model.
Refer to superclass [`BertTokenizerFast`] for usage examples and documentation concerning parameters.
"""
vocab_files_names = VOCAB_FILES_NAMES
model_input_names = ['input_ids', 'attention_mask']
slow_tokenizer_class = DPRReaderTokenizer
|
@add_end_docstrings(CUSTOM_DPR_READER_DOCSTRING)
class DPRReaderTokenizerFast(CustomDPRReaderTokenizerMixin, BertTokenizerFast):
'''
Constructs a "fast" DPRReader tokenizer (backed by HuggingFace's *tokenizers* library).
[`DPRReaderTokenizerFast`] is almost identical to [`BertTokenizerFast`] and runs end-to-end tokenization:
punctuation splitting and wordpiece. The difference is that is has three inputs strings: question, titles and texts
that are combined to be fed to the [`DPRReader`] model.
Refer to superclass [`BertTokenizerFast`] for usage examples and documentation concerning parameters.
'''
pass
| 2
| 1
| 0
| 0
| 0
| 0
| 0
| 1.75
| 2
| 0
| 0
| 0
| 0
| 0
| 0
| 95
| 15
| 4
| 4
| 4
| 3
| 7
| 4
| 4
| 3
| 0
| 4
| 0
| 0
|
2,255
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/dpt/configuration_dpt.py
|
transformers.models.dpt.configuration_dpt.DPTConfig
|
from ...utils.backbone_utils import verify_backbone_config_arguments
from ..auto.configuration_auto import CONFIG_MAPPING
from ..bit import BitConfig
import copy
from ...configuration_utils import PretrainedConfig
class DPTConfig(PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`DPTModel`]. It is used to instantiate an DPT
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the DPT
[Intel/dpt-large](https://huggingface.co/Intel/dpt-large) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
hidden_size (`int`, *optional*, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (`int`, *optional*, defaults to 3072):
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` are supported.
hidden_dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
The epsilon used by the layer normalization layers.
image_size (`int`, *optional*, defaults to 384):
The size (resolution) of each image.
patch_size (`int`, *optional*, defaults to 16):
The size (resolution) of each patch.
num_channels (`int`, *optional*, defaults to 3):
The number of input channels.
is_hybrid (`bool`, *optional*, defaults to `False`):
Whether to use a hybrid backbone. Useful in the context of loading DPT-Hybrid models.
qkv_bias (`bool`, *optional*, defaults to `True`):
Whether to add a bias to the queries, keys and values.
backbone_out_indices (`list[int]`, *optional*, defaults to `[2, 5, 8, 11]`):
Indices of the intermediate hidden states to use from backbone.
readout_type (`str`, *optional*, defaults to `"project"`):
The readout type to use when processing the readout token (CLS token) of the intermediate hidden states of
the ViT backbone. Can be one of [`"ignore"`, `"add"`, `"project"`].
- "ignore" simply ignores the CLS token.
- "add" passes the information from the CLS token to all other tokens by adding the representations.
- "project" passes information to the other tokens by concatenating the readout to all other tokens before
projecting the
representation to the original feature dimension D using a linear layer followed by a GELU non-linearity.
reassemble_factors (`list[int]`, *optional*, defaults to `[4, 2, 1, 0.5]`):
The up/downsampling factors of the reassemble layers.
neck_hidden_sizes (`list[str]`, *optional*, defaults to `[96, 192, 384, 768]`):
The hidden sizes to project to for the feature maps of the backbone.
fusion_hidden_size (`int`, *optional*, defaults to 256):
The number of channels before fusion.
head_in_index (`int`, *optional*, defaults to -1):
The index of the features to use in the heads.
use_batch_norm_in_fusion_residual (`bool`, *optional*, defaults to `False`):
Whether to use batch normalization in the pre-activate residual units of the fusion blocks.
use_bias_in_fusion_residual (`bool`, *optional*, defaults to `True`):
Whether to use bias in the pre-activate residual units of the fusion blocks.
add_projection (`bool`, *optional*, defaults to `False`):
Whether to add a projection layer before the depth estimation head.
use_auxiliary_head (`bool`, *optional*, defaults to `True`):
Whether to use an auxiliary head during training.
auxiliary_loss_weight (`float`, *optional*, defaults to 0.4):
Weight of the cross-entropy loss of the auxiliary head.
semantic_loss_ignore_index (`int`, *optional*, defaults to 255):
The index that is ignored by the loss function of the semantic segmentation model.
semantic_classifier_dropout (`float`, *optional*, defaults to 0.1):
The dropout ratio for the semantic classification head.
backbone_featmap_shape (`list[int]`, *optional*, defaults to `[1, 1024, 24, 24]`):
Used only for the `hybrid` embedding type. The shape of the feature maps of the backbone.
neck_ignore_stages (`list[int]`, *optional*, defaults to `[0, 1]`):
Used only for the `hybrid` embedding type. The stages of the readout layers to ignore.
backbone_config (`Union[dict[str, Any], PretrainedConfig]`, *optional*):
The configuration of the backbone model. Only used in case `is_hybrid` is `True` or in case you want to
leverage the [`AutoBackbone`] API.
backbone (`str`, *optional*):
Name of backbone to use when `backbone_config` is `None`. If `use_pretrained_backbone` is `True`, this
will load the corresponding pretrained weights from the timm or transformers library. If `use_pretrained_backbone`
is `False`, this loads the backbone's config and uses that to initialize the backbone with random weights.
use_pretrained_backbone (`bool`, *optional*, defaults to `False`):
Whether to use pretrained weights for the backbone.
use_timm_backbone (`bool`, *optional*, defaults to `False`):
Whether to load `backbone` from the timm library. If `False`, the backbone is loaded from the transformers
library.
backbone_kwargs (`dict`, *optional*):
Keyword arguments to be passed to AutoBackbone when loading from a checkpoint
e.g. `{'out_indices': (0, 1, 2, 3)}`. Cannot be specified if `backbone_config` is set.
pooler_output_size (`int`, *optional*):
Dimensionality of the pooler layer. If None, defaults to `hidden_size`.
pooler_act (`str`, *optional*, defaults to `"tanh"`):
The activation function to be used by the pooler.
Example:
```python
>>> from transformers import DPTModel, DPTConfig
>>> # Initializing a DPT dpt-large style configuration
>>> configuration = DPTConfig()
>>> # Initializing a model from the dpt-large style configuration
>>> model = DPTModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = 'dpt'
def __init__(self, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, initializer_range=0.02, layer_norm_eps=1e-12, image_size=384, patch_size=16, num_channels=3, is_hybrid=False, qkv_bias=True, backbone_out_indices=[2, 5, 8, 11], readout_type='project', reassemble_factors=[4, 2, 1, 0.5], neck_hidden_sizes=[96, 192, 384, 768], fusion_hidden_size=256, head_in_index=-1, use_batch_norm_in_fusion_residual=False, use_bias_in_fusion_residual=None, add_projection=False, use_auxiliary_head=True, auxiliary_loss_weight=0.4, semantic_loss_ignore_index=255, semantic_classifier_dropout=0.1, backbone_featmap_shape=[1, 1024, 24, 24], neck_ignore_stages=[0, 1], backbone_config=None, backbone=None, use_pretrained_backbone=False, use_timm_backbone=False, backbone_kwargs=None, pooler_output_size=None, pooler_act='tanh', **kwargs):
super().__init__(**kwargs)
self.hidden_size = hidden_size
self.is_hybrid = is_hybrid
use_autobackbone = False
if self.is_hybrid:
if backbone_config is None:
backbone_config = {'global_padding': 'same', 'layer_type': 'bottleneck', 'depths': [3, 4, 9], 'out_features': ['stage1', 'stage2', 'stage3'], 'embedding_dynamic_padding': True}
if isinstance(backbone_config, dict):
logger.info('Initializing the config with a `BiT` backbone.')
backbone_config = BitConfig(**backbone_config)
elif isinstance(backbone_config, PretrainedConfig):
backbone_config = backbone_config
else:
raise ValueError(f'backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.')
self.backbone_config = backbone_config
self.backbone_featmap_shape = backbone_featmap_shape
self.neck_ignore_stages = neck_ignore_stages
if readout_type != 'project':
raise ValueError("Readout type must be 'project' when using `DPT-hybrid` mode.")
elif backbone is not None or backbone_config is not None:
use_autobackbone = True
if isinstance(backbone_config, dict):
backbone_model_type = backbone_config.get('model_type')
config_class = CONFIG_MAPPING[backbone_model_type]
backbone_config = config_class.from_dict(backbone_config)
self.backbone_config = backbone_config
self.backbone_featmap_shape = None
self.neck_ignore_stages = []
verify_backbone_config_arguments(use_timm_backbone=use_timm_backbone, use_pretrained_backbone=use_pretrained_backbone, backbone=backbone, backbone_config=backbone_config, backbone_kwargs=backbone_kwargs)
else:
self.backbone_config = None
self.backbone_featmap_shape = None
self.neck_ignore_stages = []
self.backbone = backbone
self.use_pretrained_backbone = use_pretrained_backbone
self.use_timm_backbone = use_timm_backbone
self.backbone_kwargs = backbone_kwargs
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.layer_norm_eps = layer_norm_eps
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.qkv_bias = qkv_bias
self.use_autobackbone = use_autobackbone
self.backbone_out_indices = None if use_autobackbone else backbone_out_indices
if readout_type not in ['ignore', 'add', 'project']:
raise ValueError("Readout_type must be one of ['ignore', 'add', 'project']")
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.readout_type = readout_type
self.reassemble_factors = reassemble_factors
self.neck_hidden_sizes = neck_hidden_sizes
self.fusion_hidden_size = fusion_hidden_size
self.head_in_index = head_in_index
self.use_batch_norm_in_fusion_residual = use_batch_norm_in_fusion_residual
self.use_bias_in_fusion_residual = use_bias_in_fusion_residual
self.add_projection = add_projection
self.use_auxiliary_head = use_auxiliary_head
self.auxiliary_loss_weight = auxiliary_loss_weight
self.semantic_loss_ignore_index = semantic_loss_ignore_index
self.semantic_classifier_dropout = semantic_classifier_dropout
self.pooler_output_size = pooler_output_size if pooler_output_size else hidden_size
self.pooler_act = pooler_act
def to_dict(self):
"""
Serializes this instance to a Python dictionary. Override the default [`~PretrainedConfig.to_dict`]. Returns:
`dict[str, any]`: Dictionary of all the attributes that make up this configuration instance,
"""
output = copy.deepcopy(self.__dict__)
if output['backbone_config'] is not None:
output['backbone_config'] = self.backbone_config.to_dict()
output['model_type'] = self.__class__.model_type
return output
@property
def sub_configs(self):
return {'backbone_config': type(self.backbone_config)} if getattr(self, 'backbone_config', None) is not None else {}
| null | 5
| 2
| 72
| 7
| 62
| 4
| 6
| 0.84
| 1
| 4
| 1
| 0
| 2
| 35
| 2
| 2
| 255
| 25
| 125
| 80
| 85
| 105
| 70
| 43
| 67
| 10
| 1
| 2
| 12
|
2,256
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/dpt/feature_extraction_dpt.py
|
transformers.models.dpt.feature_extraction_dpt.DPTFeatureExtractor
|
from .image_processing_dpt import DPTImageProcessor
from ...utils.import_utils import requires
import warnings
@requires(backends=('vision',))
class DPTFeatureExtractor(DPTImageProcessor):
def __init__(self, *args, **kwargs) -> None:
warnings.warn('The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please use DPTImageProcessor instead.', FutureWarning)
super().__init__(*args, **kwargs)
|
@requires(backends=('vision',))
class DPTFeatureExtractor(DPTImageProcessor):
def __init__(self, *args, **kwargs) -> None:
pass
| 3
| 0
| 7
| 0
| 7
| 0
| 1
| 0
| 1
| 2
| 0
| 0
| 1
| 0
| 1
| 32
| 8
| 0
| 8
| 2
| 6
| 0
| 4
| 2
| 2
| 1
| 4
| 0
| 1
|
2,257
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/dpt/image_processing_dpt.py
|
transformers.models.dpt.image_processing_dpt.DPTImageProcessor
|
from ...utils.import_utils import requires
from ...image_utils import IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, get_image_size, infer_channel_dimension_format, is_scaled_image, is_torch_available, is_torch_tensor, make_flat_list_of_images, to_numpy_array, valid_images, validate_preprocess_arguments
from ...utils import TensorType, filter_out_non_signature_kwargs, is_vision_available, logging, requires_backends
from ...image_transforms import pad, resize, to_channel_dimension_format
from typing import TYPE_CHECKING, Optional, Union
import math
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
@requires(backends=('vision',))
class DPTImageProcessor(BaseImageProcessor):
"""
Constructs a DPT image processor.
Args:
do_resize (`bool`, *optional*, defaults to `True`):
Whether to resize the image's (height, width) dimensions. Can be overridden by `do_resize` in `preprocess`.
size (`dict[str, int]` *optional*, defaults to `{"height": 384, "width": 384}`):
Size of the image after resizing. Can be overridden by `size` in `preprocess`.
resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`):
Defines the resampling filter to use if resizing the image. Can be overridden by `resample` in `preprocess`.
keep_aspect_ratio (`bool`, *optional*, defaults to `False`):
If `True`, the image is resized to the largest possible size such that the aspect ratio is preserved. Can
be overridden by `keep_aspect_ratio` in `preprocess`.
ensure_multiple_of (`int`, *optional*, defaults to 1):
If `do_resize` is `True`, the image is resized to a size that is a multiple of this value. Can be overridden
by `ensure_multiple_of` in `preprocess`.
do_rescale (`bool`, *optional*, defaults to `True`):
Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by `do_rescale` in
`preprocess`.
rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
Scale factor to use if rescaling the image. Can be overridden by `rescale_factor` in `preprocess`.
do_normalize (`bool`, *optional*, defaults to `True`):
Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess`
method.
image_mean (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`):
Mean to use if normalizing the image. This is a float or list of floats the length of the number of
channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
image_std (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`):
Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
do_pad (`bool`, *optional*, defaults to `False`):
Whether to apply center padding. This was introduced in the DINOv2 paper, which uses the model in
combination with DPT.
size_divisor (`int`, *optional*):
If `do_pad` is `True`, pads the image dimensions to be divisible by this value. This was introduced in the
DINOv2 paper, which uses the model in combination with DPT.
do_reduce_labels (`bool`, *optional*, defaults to `False`):
Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is
used for background, and background itself is not included in all classes of a dataset (e.g. ADE20k). The
background label will be replaced by 255. Can be overridden by the `do_reduce_labels` parameter in the
`preprocess` method.
"""
model_input_names = ['pixel_values']
def __init__(self, do_resize: bool=True, size: Optional[dict[str, int]]=None, resample: PILImageResampling=PILImageResampling.BICUBIC, keep_aspect_ratio: bool=False, ensure_multiple_of: int=1, do_rescale: bool=True, rescale_factor: Union[int, float]=1 / 255, do_normalize: bool=True, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, do_pad: bool=False, size_divisor: Optional[int]=None, do_reduce_labels: bool=False, **kwargs) -> None:
super().__init__(**kwargs)
size = size if size is not None else {'height': 384, 'width': 384}
size = get_size_dict(size)
self.do_resize = do_resize
self.size = size
self.keep_aspect_ratio = keep_aspect_ratio
self.ensure_multiple_of = ensure_multiple_of
self.resample = resample
self.do_rescale = do_rescale
self.rescale_factor = rescale_factor
self.do_normalize = do_normalize
self.image_mean = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
self.image_std = image_std if image_std is not None else IMAGENET_STANDARD_STD
self.do_pad = do_pad
self.size_divisor = size_divisor
self.do_reduce_labels = do_reduce_labels
def resize(self, image: np.ndarray, size: dict[str, int], keep_aspect_ratio: bool=False, ensure_multiple_of: int=1, resample: PILImageResampling=PILImageResampling.BICUBIC, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray:
"""
Resize an image to target size `(size["height"], size["width"])`. If `keep_aspect_ratio` is `True`, the image
is resized to the largest possible size such that the aspect ratio is preserved. If `ensure_multiple_of` is
set, the image is resized to a size that is a multiple of this value.
Args:
image (`np.ndarray`):
Image to resize.
size (`dict[str, int]`):
Target size of the output image.
keep_aspect_ratio (`bool`, *optional*, defaults to `False`):
If `True`, the image is resized to the largest possible size such that the aspect ratio is preserved.
ensure_multiple_of (`int`, *optional*, defaults to 1):
The image is resized to a size that is a multiple of this value.
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
Defines the resampling filter to use if resizing the image. Otherwise, the image is resized to size
specified in `size`.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the image. If not provided, it will be the same as the input image.
input_data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
"""
size = get_size_dict(size)
if 'height' not in size or 'width' not in size:
raise ValueError(f"The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}")
output_size = get_resize_output_image_size(image, output_size=(size['height'], size['width']), keep_aspect_ratio=keep_aspect_ratio, multiple=ensure_multiple_of, input_data_format=input_data_format)
return resize(image, size=output_size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs)
def pad_image(self, image: np.ndarray, size_divisor: int, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None):
"""
Center pad an image to be a multiple of `multiple`.
Args:
image (`np.ndarray`):
Image to pad.
size_divisor (`int`):
The width and height of the image will be padded to a multiple of this number.
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- Unset: Use the channel dimension format of the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
"""
def _get_pad(size, size_divisor):
new_size = math.ceil(size / size_divisor) * size_divisor
pad_size = new_size - size
pad_size_left = pad_size // 2
pad_size_right = pad_size - pad_size_left
return (pad_size_left, pad_size_right)
if input_data_format is None:
input_data_format = infer_channel_dimension_format(image)
height, width = get_image_size(image, input_data_format)
pad_size_left, pad_size_right = _get_pad(height, size_divisor)
pad_size_top, pad_size_bottom = _get_pad(width, size_divisor)
return pad(image, ((pad_size_left, pad_size_right), (pad_size_top, pad_size_bottom)), data_format=data_format)
def reduce_label(self, label: ImageInput) -> np.ndarray:
label = to_numpy_array(label)
label[label == 0] = 255
label = label - 1
label[label == 254] = 255
return label
def _preprocess(self, image: ImageInput, do_reduce_labels: Optional[bool]=None, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, resample: Optional[PILImageResampling]=None, keep_aspect_ratio: Optional[bool]=None, ensure_multiple_of: Optional[int]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, do_pad: Optional[bool]=None, size_divisor: Optional[int]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None):
if do_reduce_labels:
image = self.reduce_label(image)
if do_resize:
image = self.resize(image=image, size=size, resample=resample, keep_aspect_ratio=keep_aspect_ratio, ensure_multiple_of=ensure_multiple_of, input_data_format=input_data_format)
if do_rescale:
image = self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
if do_normalize:
image = self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format)
if do_pad:
image = self.pad_image(image=image, size_divisor=size_divisor, input_data_format=input_data_format)
return image
def _preprocess_image(self, image: ImageInput, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, resample: Optional[PILImageResampling]=None, keep_aspect_ratio: Optional[bool]=None, ensure_multiple_of: Optional[int]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, do_pad: Optional[bool]=None, size_divisor: Optional[int]=None, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray:
"""Preprocesses a single image."""
image = to_numpy_array(image)
if do_rescale and is_scaled_image(image):
logger.warning_once('It looks like you are trying to rescale already rescaled images. If the input images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again.')
if input_data_format is None:
input_data_format = infer_channel_dimension_format(image)
image = self._preprocess(image, do_reduce_labels=False, do_resize=do_resize, size=size, resample=resample, keep_aspect_ratio=keep_aspect_ratio, ensure_multiple_of=ensure_multiple_of, do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_pad=do_pad, size_divisor=size_divisor, input_data_format=input_data_format)
if data_format is not None:
image = to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format)
return image
def _preprocess_segmentation_map(self, segmentation_map: ImageInput, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, resample: Optional[PILImageResampling]=None, keep_aspect_ratio: Optional[bool]=None, ensure_multiple_of: Optional[int]=None, do_reduce_labels: Optional[bool]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None):
"""Preprocesses a single segmentation map."""
segmentation_map = to_numpy_array(segmentation_map)
if segmentation_map.ndim == 2:
segmentation_map = segmentation_map[None, ...]
added_dimension = True
input_data_format = ChannelDimension.FIRST
else:
added_dimension = False
if input_data_format is None:
input_data_format = infer_channel_dimension_format(segmentation_map, num_channels=1)
segmentation_map = self._preprocess(image=segmentation_map, do_reduce_labels=do_reduce_labels, do_resize=do_resize, size=size, resample=resample, keep_aspect_ratio=keep_aspect_ratio, ensure_multiple_of=ensure_multiple_of, do_normalize=False, do_rescale=False, input_data_format=input_data_format)
if added_dimension:
segmentation_map = np.squeeze(segmentation_map, axis=0)
segmentation_map = segmentation_map.astype(np.int64)
return segmentation_map
def __call__(self, images, segmentation_maps=None, **kwargs):
return super().__call__(images, segmentation_maps=segmentation_maps, **kwargs)
@filter_out_non_signature_kwargs()
def preprocess(self, images: ImageInput, segmentation_maps: Optional[ImageInput]=None, do_resize: Optional[bool]=None, size: Optional[int]=None, keep_aspect_ratio: Optional[bool]=None, ensure_multiple_of: Optional[int]=None, resample: Optional[PILImageResampling]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, do_pad: Optional[bool]=None, size_divisor: Optional[int]=None, do_reduce_labels: Optional[bool]=None, return_tensors: Optional[Union[str, TensorType]]=None, data_format: ChannelDimension=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> PIL.Image.Image:
"""
Preprocess an image or batch of images.
Args:
images (`ImageInput`):
Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
passing in images with pixel values between 0 and 1, set `do_rescale=False`.
segmentation_maps (`ImageInput`, *optional*):
Segmentation map to preprocess.
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
Whether to resize the image.
size (`dict[str, int]`, *optional*, defaults to `self.size`):
Size of the image after reszing. If `keep_aspect_ratio` is `True`, the image is resized to the largest
possible size such that the aspect ratio is preserved. If `ensure_multiple_of` is set, the image is
resized to a size that is a multiple of this value.
keep_aspect_ratio (`bool`, *optional*, defaults to `self.keep_aspect_ratio`):
Whether to keep the aspect ratio of the image. If False, the image will be resized to (size, size). If
True, the image will be resized to keep the aspect ratio and the size will be the maximum possible.
ensure_multiple_of (`int`, *optional*, defaults to `self.ensure_multiple_of`):
Ensure that the image size is a multiple of this value.
resample (`int`, *optional*, defaults to `self.resample`):
Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`, Only
has an effect if `do_resize` is set to `True`.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the image values between [0 - 1].
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
Whether to normalize the image.
image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`):
Image mean.
image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`):
Image standard deviation.
do_reduce_labels (`bool`, *optional*, defaults to `self.do_reduce_labels`):
Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0
is used for background, and background itself is not included in all classes of a dataset (e.g.
ADE20k). The background label will be replaced by 255.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. Can be one of:
- `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `ChannelDimension.LAST`: image in (height, width, num_channels) format.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
"""
do_resize = do_resize if do_resize is not None else self.do_resize
size = size if size is not None else self.size
size = get_size_dict(size)
keep_aspect_ratio = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
ensure_multiple_of = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
resample = resample if resample is not None else self.resample
do_rescale = do_rescale if do_rescale is not None else self.do_rescale
rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
do_normalize = do_normalize if do_normalize is not None else self.do_normalize
image_mean = image_mean if image_mean is not None else self.image_mean
image_std = image_std if image_std is not None else self.image_std
do_pad = do_pad if do_pad is not None else self.do_pad
size_divisor = size_divisor if size_divisor is not None else self.size_divisor
do_reduce_labels = do_reduce_labels if do_reduce_labels is not None else self.do_reduce_labels
images = make_flat_list_of_images(images)
if segmentation_maps is not None:
segmentation_maps = make_flat_list_of_images(segmentation_maps, expected_ndims=2)
if not valid_images(images):
raise ValueError('Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, or torch.Tensor')
validate_preprocess_arguments(do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_resize=do_resize, size=size, resample=resample)
images = [self._preprocess_image(image=img, do_resize=do_resize, do_rescale=do_rescale, do_normalize=do_normalize, do_pad=do_pad, size=size, resample=resample, keep_aspect_ratio=keep_aspect_ratio, ensure_multiple_of=ensure_multiple_of, rescale_factor=rescale_factor, image_mean=image_mean, image_std=image_std, size_divisor=size_divisor, data_format=data_format, input_data_format=input_data_format) for img in images]
data = {'pixel_values': images}
if segmentation_maps is not None:
segmentation_maps = [self._preprocess_segmentation_map(segmentation_map=segmentation_map, do_reduce_labels=do_reduce_labels, do_resize=do_resize, size=size, resample=resample, keep_aspect_ratio=keep_aspect_ratio, ensure_multiple_of=ensure_multiple_of, input_data_format=input_data_format) for segmentation_map in segmentation_maps]
data['labels'] = segmentation_maps
return BatchFeature(data=data, tensor_type=return_tensors)
def post_process_semantic_segmentation(self, outputs, target_sizes: Optional[list[tuple]]=None):
"""
Converts the output of [`DPTForSemanticSegmentation`] into semantic segmentation maps.
Args:
outputs ([`DPTForSemanticSegmentation`]):
Raw outputs of the model.
target_sizes (`list[Tuple]` of length `batch_size`, *optional*):
List of tuples corresponding to the requested final size (height, width) of each prediction. If unset,
predictions will not be resized.
Returns:
semantic_segmentation: `list[torch.Tensor]` of length `batch_size`, where each item is a semantic
segmentation map of shape (height, width) corresponding to the target_sizes entry (if `target_sizes` is
specified). Each entry of each `torch.Tensor` correspond to a semantic class id.
"""
logits = outputs.logits
if target_sizes is not None:
if len(logits) != len(target_sizes):
raise ValueError('Make sure that you pass in as many target sizes as the batch dimension of the logits')
if is_torch_tensor(target_sizes):
target_sizes = target_sizes.numpy()
semantic_segmentation = []
for idx in range(len(logits)):
resized_logits = torch.nn.functional.interpolate(logits[idx].unsqueeze(dim=0), size=target_sizes[idx], mode='bilinear', align_corners=False)
semantic_map = resized_logits[0].argmax(dim=0)
semantic_segmentation.append(semantic_map)
else:
semantic_segmentation = logits.argmax(dim=1)
semantic_segmentation = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
return semantic_segmentation
def post_process_depth_estimation(self, outputs: 'DepthEstimatorOutput', target_sizes: Optional[Union[TensorType, list[tuple[int, int]], None]]=None) -> list[dict[str, TensorType]]:
"""
Converts the raw output of [`DepthEstimatorOutput`] into final depth predictions and depth PIL images.
Only supports PyTorch.
Args:
outputs ([`DepthEstimatorOutput`]):
Raw outputs of the model.
target_sizes (`TensorType` or `list[tuple[int, int]]`, *optional*):
Tensor of shape `(batch_size, 2)` or list of tuples (`tuple[int, int]`) containing the target size
(height, width) of each image in the batch. If left to None, predictions will not be resized.
Returns:
`list[dict[str, TensorType]]`: A list of dictionaries of tensors representing the processed depth
predictions.
"""
requires_backends(self, 'torch')
predicted_depth = outputs.predicted_depth
if target_sizes is not None and len(predicted_depth) != len(target_sizes):
raise ValueError('Make sure that you pass in as many target sizes as the batch dimension of the predicted depth')
results = []
target_sizes = [None] * len(predicted_depth) if target_sizes is None else target_sizes
for depth, target_size in zip(predicted_depth, target_sizes):
if target_size is not None:
depth = torch.nn.functional.interpolate(depth.unsqueeze(0).unsqueeze(1), size=target_size, mode='bicubic', align_corners=False).squeeze()
results.append({'predicted_depth': depth})
return results
|
@requires(backends=('vision',))
class DPTImageProcessor(BaseImageProcessor):
'''
Constructs a DPT image processor.
Args:
do_resize (`bool`, *optional*, defaults to `True`):
Whether to resize the image's (height, width) dimensions. Can be overridden by `do_resize` in `preprocess`.
size (`dict[str, int]` *optional*, defaults to `{"height": 384, "width": 384}`):
Size of the image after resizing. Can be overridden by `size` in `preprocess`.
resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`):
Defines the resampling filter to use if resizing the image. Can be overridden by `resample` in `preprocess`.
keep_aspect_ratio (`bool`, *optional*, defaults to `False`):
If `True`, the image is resized to the largest possible size such that the aspect ratio is preserved. Can
be overridden by `keep_aspect_ratio` in `preprocess`.
ensure_multiple_of (`int`, *optional*, defaults to 1):
If `do_resize` is `True`, the image is resized to a size that is a multiple of this value. Can be overridden
by `ensure_multiple_of` in `preprocess`.
do_rescale (`bool`, *optional*, defaults to `True`):
Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by `do_rescale` in
`preprocess`.
rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
Scale factor to use if rescaling the image. Can be overridden by `rescale_factor` in `preprocess`.
do_normalize (`bool`, *optional*, defaults to `True`):
Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess`
method.
image_mean (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`):
Mean to use if normalizing the image. This is a float or list of floats the length of the number of
channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
image_std (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`):
Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
do_pad (`bool`, *optional*, defaults to `False`):
Whether to apply center padding. This was introduced in the DINOv2 paper, which uses the model in
combination with DPT.
size_divisor (`int`, *optional*):
If `do_pad` is `True`, pads the image dimensions to be divisible by this value. This was introduced in the
DINOv2 paper, which uses the model in combination with DPT.
do_reduce_labels (`bool`, *optional*, defaults to `False`):
Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is
used for background, and background itself is not included in all classes of a dataset (e.g. ADE20k). The
background label will be replaced by 255. Can be overridden by the `do_reduce_labels` parameter in the
`preprocess` method.
'''
def __init__(self, do_resize: bool=True, size: Optional[dict[str, int]]=None, resample: PILImageResampling=PILImageResampling.BICUBIC, keep_aspect_ratio: bool=False, ensure_multiple_of: int=1, do_rescale: bool=True, rescale_factor: Union[int, float]=1 / 255, do_normalize: bool=True, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, do_pad: bool=False, size_divisor: Optional[int]=None, do_reduce_labels: bool=False, **kwargs) -> None:
pass
def resize(self, image: np.ndarray, size: dict[str, int], keep_aspect_ratio: bool=False, ensure_multiple_of: int=1, resample: PILImageResampling=PILImageResampling.BICUBIC, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray:
'''
Resize an image to target size `(size["height"], size["width"])`. If `keep_aspect_ratio` is `True`, the image
is resized to the largest possible size such that the aspect ratio is preserved. If `ensure_multiple_of` is
set, the image is resized to a size that is a multiple of this value.
Args:
image (`np.ndarray`):
Image to resize.
size (`dict[str, int]`):
Target size of the output image.
keep_aspect_ratio (`bool`, *optional*, defaults to `False`):
If `True`, the image is resized to the largest possible size such that the aspect ratio is preserved.
ensure_multiple_of (`int`, *optional*, defaults to 1):
The image is resized to a size that is a multiple of this value.
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
Defines the resampling filter to use if resizing the image. Otherwise, the image is resized to size
specified in `size`.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the image. If not provided, it will be the same as the input image.
input_data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
'''
pass
def pad_image(self, image: np.ndarray, size_divisor: int, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None):
'''
Center pad an image to be a multiple of `multiple`.
Args:
image (`np.ndarray`):
Image to pad.
size_divisor (`int`):
The width and height of the image will be padded to a multiple of this number.
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- Unset: Use the channel dimension format of the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
'''
pass
def _get_pad(size, size_divisor):
pass
def reduce_label(self, label: ImageInput) -> np.ndarray:
pass
def _preprocess(self, image: ImageInput, do_reduce_labels: Optional[bool]=None, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, resample: Optional[PILImageResampling]=None, keep_aspect_ratio: Optional[bool]=None, ensure_multiple_of: Optional[int]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, do_pad: Optional[bool]=None, size_divisor: Optional[int]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None):
pass
def _preprocess_image(self, image: ImageInput, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, resample: Optional[PILImageResampling]=None, keep_aspect_ratio: Optional[bool]=None, ensure_multiple_of: Optional[int]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, do_pad: Optional[bool]=None, size_divisor: Optional[int]=None, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray:
'''Preprocesses a single image.'''
pass
def _preprocess_segmentation_map(self, segmentation_map: ImageInput, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, resample: Optional[PILImageResampling]=None, keep_aspect_ratio: Optional[bool]=None, ensure_multiple_of: Optional[int]=None, do_reduce_labels: Optional[bool]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None):
'''Preprocesses a single segmentation map.'''
pass
def __call__(self, images, segmentation_maps=None, **kwargs):
pass
@filter_out_non_signature_kwargs()
def preprocess(self, images: ImageInput, segmentation_maps: Optional[ImageInput]=None, do_resize: Optional[bool]=None, size: Optional[int]=None, keep_aspect_ratio: Optional[bool]=None, ensure_multiple_of: Optional[int]=None, resample: Optional[PILImageResampling]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, do_pad: Optional[bool]=None, size_divisor: Optional[int]=None, do_reduce_labels: Optional[bool]=None, return_tensors: Optional[Union[str, TensorType]]=None, data_format: ChannelDimension=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> PIL.Image.Image:
'''
Preprocess an image or batch of images.
Args:
images (`ImageInput`):
Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
passing in images with pixel values between 0 and 1, set `do_rescale=False`.
segmentation_maps (`ImageInput`, *optional*):
Segmentation map to preprocess.
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
Whether to resize the image.
size (`dict[str, int]`, *optional*, defaults to `self.size`):
Size of the image after reszing. If `keep_aspect_ratio` is `True`, the image is resized to the largest
possible size such that the aspect ratio is preserved. If `ensure_multiple_of` is set, the image is
resized to a size that is a multiple of this value.
keep_aspect_ratio (`bool`, *optional*, defaults to `self.keep_aspect_ratio`):
Whether to keep the aspect ratio of the image. If False, the image will be resized to (size, size). If
True, the image will be resized to keep the aspect ratio and the size will be the maximum possible.
ensure_multiple_of (`int`, *optional*, defaults to `self.ensure_multiple_of`):
Ensure that the image size is a multiple of this value.
resample (`int`, *optional*, defaults to `self.resample`):
Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`, Only
has an effect if `do_resize` is set to `True`.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the image values between [0 - 1].
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
Whether to normalize the image.
image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`):
Image mean.
image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`):
Image standard deviation.
do_reduce_labels (`bool`, *optional*, defaults to `self.do_reduce_labels`):
Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0
is used for background, and background itself is not included in all classes of a dataset (e.g.
ADE20k). The background label will be replaced by 255.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. Can be one of:
- `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `ChannelDimension.LAST`: image in (height, width, num_channels) format.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
'''
pass
def post_process_semantic_segmentation(self, outputs, target_sizes: Optional[list[tuple]]=None):
'''
Converts the output of [`DPTForSemanticSegmentation`] into semantic segmentation maps.
Args:
outputs ([`DPTForSemanticSegmentation`]):
Raw outputs of the model.
target_sizes (`list[Tuple]` of length `batch_size`, *optional*):
List of tuples corresponding to the requested final size (height, width) of each prediction. If unset,
predictions will not be resized.
Returns:
semantic_segmentation: `list[torch.Tensor]` of length `batch_size`, where each item is a semantic
segmentation map of shape (height, width) corresponding to the target_sizes entry (if `target_sizes` is
specified). Each entry of each `torch.Tensor` correspond to a semantic class id.
'''
pass
def post_process_depth_estimation(self, outputs: 'DepthEstimatorOutput', target_sizes: Optional[Union[TensorType, list[tuple[int, int]], None]]=None) -> list[dict[str, TensorType]]:
'''
Converts the raw output of [`DepthEstimatorOutput`] into final depth predictions and depth PIL images.
Only supports PyTorch.
Args:
outputs ([`DepthEstimatorOutput`]):
Raw outputs of the model.
target_sizes (`TensorType` or `list[tuple[int, int]]`, *optional*):
Tensor of shape `(batch_size, 2)` or list of tuples (`tuple[int, int]`) containing the target size
(height, width) of each image in the batch. If left to None, predictions will not be resized.
Returns:
`list[dict[str, TensorType]]`: A list of dictionaries of tensors representing the processed depth
predictions.
'''
pass
| 15
| 8
| 43
| 3
| 29
| 11
| 4
| 0.52
| 1
| 10
| 2
| 1
| 11
| 13
| 11
| 31
| 570
| 50
| 342
| 146
| 228
| 178
| 136
| 45
| 123
| 17
| 3
| 2
| 52
|
2,258
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/dpt/modeling_dpt.py
|
transformers.models.dpt.modeling_dpt.DPTAuxiliaryHead
|
from .configuration_dpt import DPTConfig
import torch
from torch import nn
class DPTAuxiliaryHead(nn.Module):
def __init__(self, config: DPTConfig):
super().__init__()
features = config.fusion_hidden_size
self.head = nn.Sequential(nn.Conv2d(features, features, kernel_size=3, padding=1, bias=False), nn.BatchNorm2d(features), nn.ReLU(), nn.Dropout(0.1, False), nn.Conv2d(features, config.num_labels, kernel_size=1))
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
logits = self.head(hidden_states)
return logits
|
class DPTAuxiliaryHead(nn.Module):
def __init__(self, config: DPTConfig):
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 8
| 1
| 7
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 2
| 1
| 2
| 12
| 17
| 3
| 14
| 6
| 11
| 0
| 8
| 6
| 5
| 1
| 1
| 0
| 2
|
2,259
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/dpt/modeling_dpt.py
|
transformers.models.dpt.modeling_dpt.DPTDepthEstimationHead
|
import torch
from torch import nn
from .configuration_dpt import DPTConfig
class DPTDepthEstimationHead(nn.Module):
"""
Output head consisting of 3 convolutional layers. It progressively halves the feature dimension and upsamples
the predictions to the input resolution after the first convolutional layer (details can be found in the paper's
supplementary material).
"""
def __init__(self, config: DPTConfig):
super().__init__()
self.config = config
self.projection = None
if config.add_projection:
self.projection = nn.Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
features = config.fusion_hidden_size
self.head = nn.Sequential(nn.Conv2d(features, features // 2, kernel_size=3, stride=1, padding=1), nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True), nn.Conv2d(features // 2, 32, kernel_size=3, stride=1, padding=1), nn.ReLU(), nn.Conv2d(32, 1, kernel_size=1, stride=1, padding=0), nn.ReLU())
def forward(self, hidden_states: list[torch.Tensor]) -> torch.Tensor:
hidden_states = hidden_states[self.config.head_in_index]
if self.projection is not None:
hidden_states = self.projection(hidden_states)
hidden_states = nn.ReLU()(hidden_states)
predicted_depth = self.head(hidden_states)
predicted_depth = predicted_depth.squeeze(dim=1)
return predicted_depth
|
class DPTDepthEstimationHead(nn.Module):
'''
Output head consisting of 3 convolutional layers. It progressively halves the feature dimension and upsamples
the predictions to the input resolution after the first convolutional layer (details can be found in the paper's
supplementary material).
'''
def __init__(self, config: DPTConfig):
pass
def forward(self, hidden_states: list[torch.Tensor]) -> torch.Tensor:
pass
| 3
| 1
| 16
| 4
| 12
| 1
| 2
| 0.25
| 1
| 2
| 0
| 0
| 2
| 3
| 2
| 12
| 39
| 9
| 24
| 8
| 21
| 6
| 17
| 8
| 14
| 2
| 1
| 1
| 4
|
2,260
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/dpt/modeling_dpt.py
|
transformers.models.dpt.modeling_dpt.DPTFeatureFusionLayer
|
from .configuration_dpt import DPTConfig
from typing import Callable, Optional
import torch
from torch import nn
class DPTFeatureFusionLayer(nn.Module):
"""Feature fusion layer, merges feature maps from different stages.
Args:
config (`[DPTConfig]`):
Model configuration class defining the model architecture.
align_corners (`bool`, *optional*, defaults to `True`):
The align_corner setting for bilinear upsample.
"""
def __init__(self, config: DPTConfig, align_corners: bool=True):
super().__init__()
self.align_corners = align_corners
self.projection = nn.Conv2d(config.fusion_hidden_size, config.fusion_hidden_size, kernel_size=1, bias=True)
self.residual_layer1 = DPTPreActResidualLayer(config)
self.residual_layer2 = DPTPreActResidualLayer(config)
def forward(self, hidden_state: torch.Tensor, residual: Optional[torch.Tensor]=None) -> torch.Tensor:
if residual is not None:
if hidden_state.shape != residual.shape:
residual = nn.functional.interpolate(residual, size=(hidden_state.shape[2], hidden_state.shape[3]), mode='bilinear', align_corners=False)
hidden_state = hidden_state + self.residual_layer1(residual)
hidden_state = self.residual_layer2(hidden_state)
hidden_state = nn.functional.interpolate(hidden_state, scale_factor=2, mode='bilinear', align_corners=self.align_corners)
hidden_state = self.projection(hidden_state)
return hidden_state
|
class DPTFeatureFusionLayer(nn.Module):
'''Feature fusion layer, merges feature maps from different stages.
Args:
config (`[DPTConfig]`):
Model configuration class defining the model architecture.
align_corners (`bool`, *optional*, defaults to `True`):
The align_corner setting for bilinear upsample.
'''
def __init__(self, config: DPTConfig, align_corners: bool=True):
pass
def forward(self, hidden_state: torch.Tensor, residual: Optional[torch.Tensor]=None) -> torch.Tensor:
pass
| 3
| 1
| 12
| 3
| 10
| 0
| 2
| 0.35
| 1
| 2
| 1
| 0
| 2
| 4
| 2
| 12
| 35
| 8
| 20
| 7
| 17
| 7
| 16
| 7
| 13
| 3
| 1
| 2
| 4
|
2,261
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/dpt/modeling_dpt.py
|
transformers.models.dpt.modeling_dpt.DPTFeatureFusionStage
|
from torch import nn
from .configuration_dpt import DPTConfig
class DPTFeatureFusionStage(nn.Module):
def __init__(self, config: DPTConfig):
super().__init__()
self.layers = nn.ModuleList()
for _ in range(len(config.neck_hidden_sizes)):
self.layers.append(DPTFeatureFusionLayer(config))
def forward(self, hidden_states):
hidden_states = hidden_states[::-1]
fused_hidden_states = []
fused_hidden_state = None
for hidden_state, layer in zip(hidden_states, self.layers):
if fused_hidden_state is None:
fused_hidden_state = layer(hidden_state)
else:
fused_hidden_state = layer(fused_hidden_state, hidden_state)
fused_hidden_states.append(fused_hidden_state)
return fused_hidden_states
|
class DPTFeatureFusionStage(nn.Module):
def __init__(self, config: DPTConfig):
pass
def forward(self, hidden_states):
pass
| 3
| 0
| 10
| 1
| 8
| 1
| 3
| 0.12
| 1
| 4
| 1
| 0
| 2
| 1
| 2
| 12
| 22
| 3
| 17
| 8
| 14
| 2
| 16
| 8
| 13
| 3
| 1
| 2
| 5
|
2,262
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/dpt/modeling_dpt.py
|
transformers.models.dpt.modeling_dpt.DPTForDepthEstimation
|
from ...utils import ModelOutput, auto_docstring, logging, torch_int
from ...utils.backbone_utils import load_backbone
from ...utils.generic import can_return_tuple, check_model_inputs
import torch
from typing import Callable, Optional
from ...modeling_outputs import BaseModelOutput, DepthEstimatorOutput, SemanticSegmenterOutput
@auto_docstring(custom_intro='\n DPT Model with a depth estimation head on top (consisting of 3 convolutional layers) e.g. for KITTI, NYUv2.\n ')
class DPTForDepthEstimation(DPTPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.backbone = None
if config.is_hybrid is False and (config.backbone_config is not None or config.backbone is not None):
self.backbone = load_backbone(config)
else:
self.dpt = DPTModel(config, add_pooling_layer=False)
self.neck = DPTNeck(config)
self.head = DPTDepthEstimationHead(config)
self.post_init()
@can_return_tuple
@auto_docstring
def forward(self, pixel_values: torch.FloatTensor, head_mask: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_hidden_states: Optional[bool]=None, **kwargs) -> DepthEstimatorOutput:
"""
labels (`torch.LongTensor` of shape `(batch_size, height, width)`, *optional*):
Ground truth depth estimation maps for computing the loss.
Examples:
```python
>>> from transformers import AutoImageProcessor, DPTForDepthEstimation
>>> import torch
>>> import numpy as np
>>> from PIL import Image
>>> import requests
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> image_processor = AutoImageProcessor.from_pretrained("Intel/dpt-large")
>>> model = DPTForDepthEstimation.from_pretrained("Intel/dpt-large")
>>> # prepare image for the model
>>> inputs = image_processor(images=image, return_tensors="pt")
>>> with torch.no_grad():
... outputs = model(**inputs)
>>> # interpolate to original size
>>> post_processed_output = image_processor.post_process_depth_estimation(
... outputs,
... target_sizes=[(image.height, image.width)],
... )
>>> # visualize the prediction
>>> predicted_depth = post_processed_output[0]["predicted_depth"]
>>> depth = predicted_depth * 255 / predicted_depth.max()
>>> depth = depth.detach().cpu().numpy()
>>> depth = Image.fromarray(depth.astype("uint8"))
```"""
if output_hidden_states is None:
output_hidden_states = self.config.output_hidden_states
loss = None
if labels is not None:
raise NotImplementedError('Training is not implemented yet')
if self.backbone is not None:
outputs = self.backbone.forward_with_filtered_kwargs(pixel_values, output_hidden_states=True, **kwargs)
hidden_states = outputs.feature_maps
else:
outputs = self.dpt(pixel_values, head_mask=head_mask, output_hidden_states=True, **kwargs)
hidden_states = outputs.hidden_states
if not self.config.is_hybrid:
hidden_states = [feature for idx, feature in enumerate(hidden_states[1:]) if idx in self.config.backbone_out_indices]
else:
backbone_hidden_states = outputs.intermediate_activations
backbone_hidden_states.extend((feature for idx, feature in enumerate(hidden_states[1:]) if idx in self.config.backbone_out_indices[2:]))
hidden_states = backbone_hidden_states
patch_height, patch_width = (None, None)
if self.config.backbone_config is not None and self.config.is_hybrid is False:
_, _, height, width = pixel_values.shape
patch_size = self.config.backbone_config.patch_size
patch_height = height // patch_size
patch_width = width // patch_size
hidden_states = self.neck(hidden_states, patch_height, patch_width)
predicted_depth = self.head(hidden_states)
return DepthEstimatorOutput(loss=loss, predicted_depth=predicted_depth, hidden_states=outputs.hidden_states if output_hidden_states else None, attentions=outputs.attentions)
|
@auto_docstring(custom_intro='\n DPT Model with a depth estimation head on top (consisting of 3 convolutional layers) e.g. for KITTI, NYUv2.\n ')
class DPTForDepthEstimation(DPTPreTrainedModel):
def __init__(self, config):
pass
@can_return_tuple
@auto_docstring
def forward(self, pixel_values: torch.FloatTensor, head_mask: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_hidden_states: Optional[bool]=None, **kwargs) -> DepthEstimatorOutput:
'''
labels (`torch.LongTensor` of shape `(batch_size, height, width)`, *optional*):
Ground truth depth estimation maps for computing the loss.
Examples:
```python
>>> from transformers import AutoImageProcessor, DPTForDepthEstimation
>>> import torch
>>> import numpy as np
>>> from PIL import Image
>>> import requests
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> image_processor = AutoImageProcessor.from_pretrained("Intel/dpt-large")
>>> model = DPTForDepthEstimation.from_pretrained("Intel/dpt-large")
>>> # prepare image for the model
>>> inputs = image_processor(images=image, return_tensors="pt")
>>> with torch.no_grad():
... outputs = model(**inputs)
>>> # interpolate to original size
>>> post_processed_output = image_processor.post_process_depth_estimation(
... outputs,
... target_sizes=[(image.height, image.width)],
... )
>>> # visualize the prediction
>>> predicted_depth = post_processed_output[0]["predicted_depth"]
>>> depth = predicted_depth * 255 / predicted_depth.max()
>>> depth = depth.detach().cpu().numpy()
>>> depth = Image.fromarray(depth.astype("uint8"))
```'''
pass
| 6
| 1
| 64
| 10
| 37
| 18
| 8
| 0.47
| 1
| 10
| 4
| 0
| 2
| 4
| 2
| 3
| 132
| 21
| 76
| 26
| 63
| 36
| 41
| 16
| 38
| 14
| 2
| 2
| 16
|
2,263
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/dpt/modeling_dpt.py
|
transformers.models.dpt.modeling_dpt.DPTForSemanticSegmentation
|
import torch
from typing import Callable, Optional
from ...utils import ModelOutput, auto_docstring, logging, torch_int
from torch.nn import CrossEntropyLoss
from .configuration_dpt import DPTConfig
from torch import nn
from ...utils.generic import can_return_tuple, check_model_inputs
from ...modeling_outputs import BaseModelOutput, DepthEstimatorOutput, SemanticSegmenterOutput
@auto_docstring
class DPTForSemanticSegmentation(DPTPreTrainedModel):
def __init__(self, config: DPTConfig):
super().__init__(config)
self.dpt = DPTModel(config, add_pooling_layer=False)
self.neck = DPTNeck(config)
self.head = DPTSemanticSegmentationHead(config)
self.auxiliary_head = DPTAuxiliaryHead(config) if config.use_auxiliary_head else None
self.post_init()
@can_return_tuple
@auto_docstring
def forward(self, pixel_values: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_hidden_states: Optional[bool]=None, **kwargs) -> SemanticSegmenterOutput:
"""
labels (`torch.LongTensor` of shape `(batch_size, height, width)`, *optional*):
Ground truth semantic segmentation maps for computing the loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels > 1`, a classification loss is computed (Cross-Entropy).
Examples:
```python
>>> from transformers import AutoImageProcessor, DPTForSemanticSegmentation
>>> from PIL import Image
>>> import requests
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> image_processor = AutoImageProcessor.from_pretrained("Intel/dpt-large-ade")
>>> model = DPTForSemanticSegmentation.from_pretrained("Intel/dpt-large-ade")
>>> inputs = image_processor(images=image, return_tensors="pt")
>>> outputs = model(**inputs)
>>> logits = outputs.logits
```"""
if output_hidden_states is None:
output_hidden_states = self.config.output_hidden_states
if labels is not None and self.config.num_labels == 1:
raise ValueError('The number of labels should be greater than one')
outputs: BaseModelOutputWithPoolingAndIntermediateActivations = self.dpt(pixel_values, head_mask=head_mask, output_hidden_states=True, **kwargs)
hidden_states = outputs.hidden_states
if not self.config.is_hybrid:
hidden_states = [feature for idx, feature in enumerate(hidden_states[1:]) if idx in self.config.backbone_out_indices]
else:
backbone_hidden_states = outputs.intermediate_activations
backbone_hidden_states.extend((feature for idx, feature in enumerate(hidden_states[1:]) if idx in self.config.backbone_out_indices[2:]))
hidden_states = backbone_hidden_states
hidden_states = self.neck(hidden_states=hidden_states)
logits = self.head(hidden_states)
auxiliary_logits = None
if self.auxiliary_head is not None:
auxiliary_logits = self.auxiliary_head(hidden_states[-1])
loss = None
if labels is not None:
upsampled_logits = nn.functional.interpolate(logits, size=labels.shape[-2:], mode='bilinear', align_corners=False)
if auxiliary_logits is not None:
upsampled_auxiliary_logits = nn.functional.interpolate(auxiliary_logits, size=labels.shape[-2:], mode='bilinear', align_corners=False)
loss_fct = CrossEntropyLoss(ignore_index=self.config.semantic_loss_ignore_index)
main_loss = loss_fct(upsampled_logits, labels)
auxiliary_loss = loss_fct(upsampled_auxiliary_logits, labels)
loss = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
return SemanticSegmenterOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states if output_hidden_states else None, attentions=outputs.attentions)
|
@auto_docstring
class DPTForSemanticSegmentation(DPTPreTrainedModel):
def __init__(self, config: DPTConfig):
pass
@can_return_tuple
@auto_docstring
def forward(self, pixel_values: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_hidden_states: Optional[bool]=None, **kwargs) -> SemanticSegmenterOutput:
'''
labels (`torch.LongTensor` of shape `(batch_size, height, width)`, *optional*):
Ground truth semantic segmentation maps for computing the loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels > 1`, a classification loss is computed (Cross-Entropy).
Examples:
```python
>>> from transformers import AutoImageProcessor, DPTForSemanticSegmentation
>>> from PIL import Image
>>> import requests
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> image_processor = AutoImageProcessor.from_pretrained("Intel/dpt-large-ade")
>>> model = DPTForSemanticSegmentation.from_pretrained("Intel/dpt-large-ade")
>>> inputs = image_processor(images=image, return_tensors="pt")
>>> outputs = model(**inputs)
>>> logits = outputs.logits
```'''
pass
| 6
| 1
| 58
| 11
| 35
| 13
| 8
| 0.36
| 1
| 11
| 5
| 0
| 2
| 4
| 2
| 3
| 120
| 22
| 73
| 29
| 60
| 26
| 40
| 19
| 37
| 14
| 2
| 2
| 16
|
2,264
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/dpt/modeling_dpt.py
|
transformers.models.dpt.modeling_dpt.DPTModel
|
from ...modeling_outputs import BaseModelOutput, DepthEstimatorOutput, SemanticSegmenterOutput
from .configuration_dpt import DPTConfig
from ...utils.generic import can_return_tuple, check_model_inputs
import torch
from torch import nn
from ...utils import ModelOutput, auto_docstring, logging, torch_int
from typing import Callable, Optional
@auto_docstring
class DPTModel(DPTPreTrainedModel):
def __init__(self, config: DPTConfig, add_pooling_layer: bool=True):
"""
add_pooling_layer (bool, *optional*, defaults to `True`):
Whether to add a pooling layer
"""
super().__init__(config)
self.config = config
if config.is_hybrid:
self.embeddings = DPTViTHybridEmbeddings(config)
else:
self.embeddings = DPTViTEmbeddings(config)
self.encoder = DPTViTEncoder(config)
self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.pooler = DPTViTPooler(config) if add_pooling_layer else None
self.post_init()
def get_input_embeddings(self):
if self.config.is_hybrid:
return self.embeddings
else:
return self.embeddings.patch_embeddings
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@check_model_inputs
@auto_docstring
def forward(self, pixel_values: torch.FloatTensor, head_mask: Optional[torch.FloatTensor]=None, output_hidden_states: Optional[bool]=None, **kwargs) -> BaseModelOutputWithPoolingAndIntermediateActivations:
if output_hidden_states is None:
output_hidden_states = self.config.output_hidden_states
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
embedding_output: BaseModelOutputWithIntermediateActivations = self.embeddings(pixel_values)
embedding_last_hidden_states = embedding_output.last_hidden_states
encoder_outputs: BaseModelOutput = self.encoder(embedding_last_hidden_states, head_mask=head_mask, output_hidden_states=output_hidden_states)
sequence_output = encoder_outputs.last_hidden_state
sequence_output = self.layernorm(sequence_output)
pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
return BaseModelOutputWithPoolingAndIntermediateActivations(last_hidden_state=sequence_output, pooler_output=pooled_output, intermediate_activations=embedding_output.intermediate_activations, hidden_states=encoder_outputs.hidden_states)
|
@auto_docstring
class DPTModel(DPTPreTrainedModel):
def __init__(self, config: DPTConfig, add_pooling_layer: bool=True):
'''
add_pooling_layer (bool, *optional*, defaults to `True`):
Whether to add a pooling layer
'''
pass
def get_input_embeddings(self):
pass
def _prune_heads(self, heads_to_prune):
'''
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
'''
pass
@check_model_inputs
@auto_docstring
def forward(self, pixel_values: torch.FloatTensor, head_mask: Optional[torch.FloatTensor]=None, output_hidden_states: Optional[bool]=None, **kwargs) -> BaseModelOutputWithPoolingAndIntermediateActivations:
pass
| 8
| 2
| 19
| 3
| 14
| 3
| 4
| 0.17
| 1
| 7
| 5
| 0
| 4
| 5
| 4
| 5
| 88
| 13
| 64
| 25
| 44
| 11
| 33
| 17
| 28
| 8
| 2
| 1
| 15
|
2,265
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/dpt/modeling_dpt.py
|
transformers.models.dpt.modeling_dpt.DPTNeck
|
from typing import Callable, Optional
from torch import nn
from .configuration_dpt import DPTConfig
import torch
class DPTNeck(nn.Module):
"""
DPTNeck. A neck is a module that is normally used between the backbone and the head. It takes a list of tensors as
input and produces another list of tensors as output. For DPT, it includes 2 stages:
* DPTReassembleStage
* DPTFeatureFusionStage.
Args:
config (dict): config dict.
"""
def __init__(self, config: DPTConfig):
super().__init__()
self.config = config
if config.backbone_config is not None and config.backbone_config.model_type in ['swinv2']:
self.reassemble_stage = None
else:
self.reassemble_stage = DPTReassembleStage(config)
self.convs = nn.ModuleList()
for channel in config.neck_hidden_sizes:
self.convs.append(nn.Conv2d(channel, config.fusion_hidden_size, kernel_size=3, padding=1, bias=False))
self.fusion_stage = DPTFeatureFusionStage(config)
def forward(self, hidden_states: list[torch.Tensor], patch_height: Optional[int]=None, patch_width: Optional[int]=None) -> list[torch.Tensor]:
"""
Args:
hidden_states (`list[torch.FloatTensor]`, each of shape `(batch_size, sequence_length, hidden_size)` or `(batch_size, hidden_size, height, width)`):
List of hidden states from the backbone.
"""
if not isinstance(hidden_states, (tuple, list)):
raise TypeError('hidden_states should be a tuple or list of tensors')
if len(hidden_states) != len(self.config.neck_hidden_sizes):
raise ValueError('The number of hidden states should be equal to the number of neck hidden sizes.')
if self.reassemble_stage is not None:
hidden_states = self.reassemble_stage(hidden_states, patch_height, patch_width)
features = [self.convs[i](feature) for i, feature in enumerate(hidden_states)]
output = self.fusion_stage(features)
return output
|
class DPTNeck(nn.Module):
'''
DPTNeck. A neck is a module that is normally used between the backbone and the head. It takes a list of tensors as
input and produces another list of tensors as output. For DPT, it includes 2 stages:
* DPTReassembleStage
* DPTFeatureFusionStage.
Args:
config (dict): config dict.
'''
def __init__(self, config: DPTConfig):
pass
def forward(self, hidden_states: list[torch.Tensor], patch_height: Optional[int]=None, patch_width: Optional[int]=None) -> list[torch.Tensor]:
'''
Args:
hidden_states (`list[torch.FloatTensor]`, each of shape `(batch_size, sequence_length, hidden_size)` or `(batch_size, hidden_size, height, width)`):
List of hidden states from the backbone.
'''
pass
| 3
| 2
| 19
| 4
| 11
| 5
| 4
| 0.77
| 1
| 9
| 2
| 0
| 2
| 4
| 2
| 12
| 51
| 12
| 22
| 10
| 19
| 17
| 21
| 10
| 18
| 4
| 1
| 1
| 7
|
2,266
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/dpt/modeling_dpt.py
|
transformers.models.dpt.modeling_dpt.DPTPreActResidualLayer
|
from torch import nn
import torch
from .configuration_dpt import DPTConfig
class DPTPreActResidualLayer(nn.Module):
"""
ResidualConvUnit, pre-activate residual unit.
Args:
config (`[DPTConfig]`):
Model configuration class defining the model architecture.
"""
def __init__(self, config: DPTConfig):
super().__init__()
self.use_batch_norm = config.use_batch_norm_in_fusion_residual
use_bias_in_fusion_residual = config.use_bias_in_fusion_residual if config.use_bias_in_fusion_residual is not None else not self.use_batch_norm
self.activation1 = nn.ReLU()
self.convolution1 = nn.Conv2d(config.fusion_hidden_size, config.fusion_hidden_size, kernel_size=3, stride=1, padding=1, bias=use_bias_in_fusion_residual)
self.activation2 = nn.ReLU()
self.convolution2 = nn.Conv2d(config.fusion_hidden_size, config.fusion_hidden_size, kernel_size=3, stride=1, padding=1, bias=use_bias_in_fusion_residual)
if self.use_batch_norm:
self.batch_norm1 = nn.BatchNorm2d(config.fusion_hidden_size)
self.batch_norm2 = nn.BatchNorm2d(config.fusion_hidden_size)
def forward(self, hidden_state: torch.Tensor) -> torch.Tensor:
residual = hidden_state
hidden_state = self.activation1(hidden_state)
hidden_state = self.convolution1(hidden_state)
if self.use_batch_norm:
hidden_state = self.batch_norm1(hidden_state)
hidden_state = self.activation2(hidden_state)
hidden_state = self.convolution2(hidden_state)
if self.use_batch_norm:
hidden_state = self.batch_norm2(hidden_state)
return hidden_state + residual
|
class DPTPreActResidualLayer(nn.Module):
'''
ResidualConvUnit, pre-activate residual unit.
Args:
config (`[DPTConfig]`):
Model configuration class defining the model architecture.
'''
def __init__(self, config: DPTConfig):
pass
def forward(self, hidden_state: torch.Tensor) -> torch.Tensor:
pass
| 3
| 1
| 25
| 5
| 20
| 0
| 3
| 0.15
| 1
| 2
| 0
| 0
| 2
| 7
| 2
| 12
| 59
| 12
| 41
| 12
| 38
| 6
| 23
| 12
| 20
| 3
| 1
| 1
| 6
|
2,267
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/dpt/modeling_dpt.py
|
transformers.models.dpt.modeling_dpt.DPTPreTrainedModel
|
from .configuration_dpt import DPTConfig
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from ...utils import ModelOutput, auto_docstring, logging, torch_int
from torch import nn
@auto_docstring
class DPTPreTrainedModel(PreTrainedModel):
config: DPTConfig
base_model_prefix = 'dpt'
main_input_name = 'pixel_values'
supports_gradient_checkpointing = True
_supports_sdpa = True
_supports_flash_attn = True
_supports_flex_attn = True
_supports_attention_backend = True
_can_record_outputs = {'attentions': DPTSelfAttention}
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, (nn.Linear, nn.Conv2d, nn.ConvTranspose2d)):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, (nn.LayerNorm, nn.BatchNorm2d)):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, (DPTViTEmbeddings, DPTViTHybridEmbeddings)):
module.cls_token.data.zero_()
module.position_embeddings.data.zero_()
|
@auto_docstring
class DPTPreTrainedModel(PreTrainedModel):
def _init_weights(self, module):
'''Initialize the weights'''
pass
| 3
| 1
| 11
| 0
| 8
| 3
| 4
| 0.54
| 1
| 0
| 0
| 3
| 1
| 0
| 1
| 1
| 22
| 2
| 13
| 6
| 11
| 7
| 12
| 6
| 10
| 4
| 1
| 2
| 4
|
2,268
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/dpt/modeling_dpt.py
|
transformers.models.dpt.modeling_dpt.DPTReassembleLayer
|
from .configuration_dpt import DPTConfig
from torch import nn
class DPTReassembleLayer(nn.Module):
def __init__(self, config: DPTConfig, channels: int, factor: int):
super().__init__()
hidden_size = _get_backbone_hidden_size(config)
self.projection = nn.Conv2d(in_channels=hidden_size, out_channels=channels, kernel_size=1)
if factor > 1:
self.resize = nn.ConvTranspose2d(channels, channels, kernel_size=factor, stride=factor, padding=0)
elif factor == 1:
self.resize = nn.Identity()
elif factor < 1:
self.resize = nn.Conv2d(channels, channels, kernel_size=3, stride=int(1 / factor), padding=1)
def forward(self, hidden_state):
hidden_state = self.projection(hidden_state)
hidden_state = self.resize(hidden_state)
return hidden_state
|
class DPTReassembleLayer(nn.Module):
def __init__(self, config: DPTConfig, channels: int, factor: int):
pass
def forward(self, hidden_state):
pass
| 3
| 0
| 9
| 1
| 7
| 2
| 3
| 0.2
| 1
| 2
| 0
| 0
| 2
| 2
| 2
| 12
| 20
| 2
| 15
| 6
| 12
| 3
| 13
| 6
| 10
| 4
| 1
| 1
| 5
|
2,269
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/dpt/modeling_dpt.py
|
transformers.models.dpt.modeling_dpt.DPTReassembleStage
|
from ...activations import ACT2FN
from ...utils import ModelOutput, auto_docstring, logging, torch_int
import torch
from torch import nn
class DPTReassembleStage(nn.Module):
"""
This class reassembles the hidden states of the backbone into image-like feature representations at various
resolutions.
This happens in 3 stages:
1. Map the N + 1 tokens to a set of N tokens, by taking into account the readout ([CLS]) token according to
`config.readout_type`.
2. Project the channel dimension of the hidden states according to `config.neck_hidden_sizes`.
3. Resizing the spatial dimensions (height, width).
Args:
config (`[DPTConfig]`):
Model configuration class defining the model architecture.
"""
def __init__(self, config):
super().__init__()
self.config = config
self.layers = nn.ModuleList()
if config.is_hybrid:
self._init_reassemble_dpt_hybrid(config)
else:
self._init_reassemble_dpt(config)
self.neck_ignore_stages = config.neck_ignore_stages
def _init_reassemble_dpt_hybrid(self, config):
""" "
For DPT-Hybrid the first 2 reassemble layers are set to `nn.Identity()`, please check the official
implementation: https://github.com/isl-org/DPT/blob/f43ef9e08d70a752195028a51be5e1aff227b913/dpt/vit.py#L438
for more details.
"""
for i, factor in zip(range(len(config.neck_hidden_sizes)), config.reassemble_factors):
if i <= 1:
self.layers.append(nn.Identity())
elif i > 1:
self.layers.append(DPTReassembleLayer(config, channels=config.neck_hidden_sizes[i], factor=factor))
if config.readout_type != 'project':
raise ValueError(f'Readout type {config.readout_type} is not supported for DPT-Hybrid.')
self.readout_projects = nn.ModuleList()
hidden_size = _get_backbone_hidden_size(config)
for i in range(len(config.neck_hidden_sizes)):
if i <= 1:
self.readout_projects.append(nn.Sequential(nn.Identity()))
elif i > 1:
self.readout_projects.append(nn.Sequential(nn.Linear(2 * hidden_size, hidden_size), ACT2FN[config.hidden_act]))
def _init_reassemble_dpt(self, config):
for i, factor in zip(range(len(config.neck_hidden_sizes)), config.reassemble_factors):
self.layers.append(DPTReassembleLayer(config, channels=config.neck_hidden_sizes[i], factor=factor))
if config.readout_type == 'project':
self.readout_projects = nn.ModuleList()
hidden_size = _get_backbone_hidden_size(config)
for _ in range(len(config.neck_hidden_sizes)):
self.readout_projects.append(nn.Sequential(nn.Linear(2 * hidden_size, hidden_size), ACT2FN[config.hidden_act]))
def forward(self, hidden_states: list[torch.Tensor], patch_height=None, patch_width=None) -> list[torch.Tensor]:
"""
Args:
hidden_states (`list[torch.FloatTensor]`, each of shape `(batch_size, sequence_length + 1, hidden_size)`):
List of hidden states from the backbone.
"""
out = []
for i, hidden_state in enumerate(hidden_states):
if i not in self.neck_ignore_stages:
cls_token, hidden_state = (hidden_state[:, 0], hidden_state[:, 1:])
batch_size, sequence_length, num_channels = hidden_state.shape
if patch_height is not None and patch_width is not None:
hidden_state = hidden_state.reshape(batch_size, patch_height, patch_width, num_channels)
else:
size = torch_int(sequence_length ** 0.5)
hidden_state = hidden_state.reshape(batch_size, size, size, num_channels)
hidden_state = hidden_state.permute(0, 3, 1, 2).contiguous()
feature_shape = hidden_state.shape
if self.config.readout_type == 'project':
hidden_state = hidden_state.flatten(2).permute((0, 2, 1))
readout = cls_token.unsqueeze(1).expand_as(hidden_state)
hidden_state = self.readout_projects[i](torch.cat((hidden_state, readout), -1))
hidden_state = hidden_state.permute(0, 2, 1).reshape(feature_shape)
elif self.config.readout_type == 'add':
hidden_state = hidden_state.flatten(2) + cls_token.unsqueeze(-1)
hidden_state = hidden_state.reshape(feature_shape)
hidden_state = self.layers[i](hidden_state)
out.append(hidden_state)
return out
|
class DPTReassembleStage(nn.Module):
'''
This class reassembles the hidden states of the backbone into image-like feature representations at various
resolutions.
This happens in 3 stages:
1. Map the N + 1 tokens to a set of N tokens, by taking into account the readout ([CLS]) token according to
`config.readout_type`.
2. Project the channel dimension of the hidden states according to `config.neck_hidden_sizes`.
3. Resizing the spatial dimensions (height, width).
Args:
config (`[DPTConfig]`):
Model configuration class defining the model architecture.
'''
def __init__(self, config):
pass
def _init_reassemble_dpt_hybrid(self, config):
''' "
For DPT-Hybrid the first 2 reassemble layers are set to `nn.Identity()`, please check the official
implementation: https://github.com/isl-org/DPT/blob/f43ef9e08d70a752195028a51be5e1aff227b913/dpt/vit.py#L438
for more details.
'''
pass
def _init_reassemble_dpt_hybrid(self, config):
pass
def forward(self, hidden_states: list[torch.Tensor], patch_height=None, patch_width=None) -> list[torch.Tensor]:
'''
Args:
hidden_states (`list[torch.FloatTensor]`, each of shape `(batch_size, sequence_length + 1, hidden_size)`):
List of hidden states from the backbone.
'''
pass
| 5
| 3
| 21
| 2
| 15
| 4
| 5
| 0.44
| 1
| 7
| 1
| 0
| 4
| 4
| 4
| 14
| 102
| 14
| 61
| 21
| 56
| 27
| 52
| 21
| 47
| 8
| 1
| 3
| 20
|
2,270
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/dpt/modeling_dpt.py
|
transformers.models.dpt.modeling_dpt.DPTSemanticSegmentationHead
|
import torch
from .configuration_dpt import DPTConfig
from torch import nn
class DPTSemanticSegmentationHead(nn.Module):
def __init__(self, config: DPTConfig):
super().__init__()
self.config = config
features = config.fusion_hidden_size
self.head = nn.Sequential(nn.Conv2d(features, features, kernel_size=3, padding=1, bias=False), nn.BatchNorm2d(features), nn.ReLU(), nn.Dropout(config.semantic_classifier_dropout), nn.Conv2d(features, config.num_labels, kernel_size=1), nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True))
def forward(self, hidden_states: list[torch.Tensor]) -> torch.Tensor:
hidden_states = hidden_states[self.config.head_in_index]
logits = self.head(hidden_states)
return logits
|
class DPTSemanticSegmentationHead(nn.Module):
def __init__(self, config: DPTConfig):
pass
def forward(self, hidden_states: list[torch.Tensor]) -> torch.Tensor:
pass
| 3
| 0
| 11
| 2
| 8
| 1
| 1
| 0.06
| 1
| 2
| 0
| 0
| 2
| 2
| 2
| 12
| 23
| 5
| 17
| 7
| 14
| 1
| 10
| 7
| 7
| 1
| 1
| 0
| 2
|
2,271
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/dpt/modeling_dpt.py
|
transformers.models.dpt.modeling_dpt.DPTViTAttention
|
from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer
from typing import Callable, Optional
import torch
from .configuration_dpt import DPTConfig
from torch import nn
class DPTViTAttention(nn.Module):
def __init__(self, config: DPTConfig):
super().__init__()
self.attention = DPTSelfAttention(config)
self.output = DPTViTSelfOutput(config)
self.pruned_heads = set()
def prune_heads(self, heads: set[int]):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(heads, self.attention.num_attention_heads, self.attention.attention_head_size, self.pruned_heads)
self.attention.query = prune_linear_layer(self.attention.query, index)
self.attention.key = prune_linear_layer(self.attention.key, index)
self.attention.value = prune_linear_layer(self.attention.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
self.attention.num_attention_heads = self.attention.num_attention_heads - len(heads)
self.attention.all_head_size = self.attention.attention_head_size * self.attention.num_attention_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(self, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor]=None) -> torch.Tensor:
self_attn_output, _ = self.attention(hidden_states, head_mask)
output = self.output(self_attn_output, hidden_states)
return output
|
class DPTViTAttention(nn.Module):
def __init__(self, config: DPTConfig):
pass
def prune_heads(self, heads: set[int]):
pass
def forward(self, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor]=None) -> torch.Tensor:
pass
| 4
| 0
| 11
| 1
| 9
| 1
| 1
| 0.17
| 1
| 8
| 3
| 0
| 3
| 3
| 3
| 13
| 39
| 6
| 29
| 16
| 20
| 5
| 22
| 11
| 18
| 2
| 1
| 1
| 4
|
2,272
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/dpt/modeling_dpt.py
|
transformers.models.dpt.modeling_dpt.DPTViTEmbeddings
|
from torch import nn
from ...utils import ModelOutput, auto_docstring, logging, torch_int
import torch
class DPTViTEmbeddings(nn.Module):
"""
Construct the CLS token, position and patch embeddings.
"""
def __init__(self, config):
super().__init__()
self.cls_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size))
self.patch_embeddings = DPTViTPatchEmbeddings(config)
num_patches = self.patch_embeddings.num_patches
self.position_embeddings = nn.Parameter(torch.zeros(1, num_patches + 1, config.hidden_size))
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.config = config
def _resize_pos_embed(self, posemb, grid_size_height, grid_size_width, start_index=1):
posemb_tok = posemb[:, :start_index]
posemb_grid = posemb[0, start_index:]
old_grid_size = torch_int(posemb_grid.size(0) ** 0.5)
posemb_grid = posemb_grid.reshape(1, old_grid_size, old_grid_size, -1).permute(0, 3, 1, 2)
posemb_grid = nn.functional.interpolate(posemb_grid, size=(grid_size_height, grid_size_width), mode='bilinear')
posemb_grid = posemb_grid.permute(0, 2, 3, 1).reshape(1, grid_size_height * grid_size_width, -1)
posemb = torch.cat([posemb_tok, posemb_grid], dim=1)
return posemb
def forward(self, pixel_values: torch.Tensor) -> BaseModelOutputWithIntermediateActivations:
batch_size, num_channels, height, width = pixel_values.shape
patch_size = self.config.patch_size
position_embeddings = self._resize_pos_embed(self.position_embeddings, height // patch_size, width // patch_size)
embeddings = self.patch_embeddings(pixel_values)
batch_size, seq_len, _ = embeddings.size()
cls_tokens = self.cls_token.expand(batch_size, -1, -1)
embeddings = torch.cat((cls_tokens, embeddings), dim=1)
embeddings = embeddings + position_embeddings
embeddings = self.dropout(embeddings)
return BaseModelOutputWithIntermediateActivations(last_hidden_states=embeddings)
|
class DPTViTEmbeddings(nn.Module):
'''
Construct the CLS token, position and patch embeddings.
'''
def __init__(self, config):
pass
def _resize_pos_embed(self, posemb, grid_size_height, grid_size_width, start_index=1):
pass
def forward(self, pixel_values: torch.Tensor) -> BaseModelOutputWithIntermediateActivations:
pass
| 4
| 1
| 16
| 4
| 11
| 1
| 1
| 0.18
| 1
| 3
| 2
| 0
| 3
| 5
| 3
| 13
| 56
| 17
| 33
| 19
| 29
| 6
| 31
| 19
| 27
| 2
| 1
| 1
| 4
|
2,273
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/dpt/modeling_dpt.py
|
transformers.models.dpt.modeling_dpt.DPTViTEncoder
|
from .configuration_dpt import DPTConfig
from ...modeling_outputs import BaseModelOutput, DepthEstimatorOutput, SemanticSegmenterOutput
from typing import Callable, Optional
import torch
from torch import nn
class DPTViTEncoder(nn.Module):
def __init__(self, config: DPTConfig):
super().__init__()
self.config = config
self.layer = nn.ModuleList([DPTViTLayer(config) for _ in range(config.num_hidden_layers)])
self.gradient_checkpointing = False
def forward(self, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor]=None, output_hidden_states: bool=False) -> BaseModelOutput:
all_hidden_states = [hidden_states] if output_hidden_states else None
for i, layer_module in enumerate(self.layer):
layer_head_mask = head_mask[i] if head_mask is not None else None
hidden_states = layer_module(hidden_states, layer_head_mask)
if all_hidden_states:
all_hidden_states.append(hidden_states)
return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=tuple(all_hidden_states) if all_hidden_states else None)
|
class DPTViTEncoder(nn.Module):
def __init__(self, config: DPTConfig):
pass
def forward(self, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor]=None, output_hidden_states: bool=False) -> BaseModelOutput:
pass
| 3
| 0
| 24
| 4
| 20
| 0
| 6
| 0
| 1
| 9
| 3
| 0
| 2
| 3
| 2
| 12
| 49
| 8
| 41
| 18
| 31
| 0
| 24
| 11
| 21
| 10
| 1
| 2
| 11
|
2,274
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/dpt/modeling_dpt.py
|
transformers.models.dpt.modeling_dpt.DPTViTHybridEmbeddings
|
from torch import nn
from ...utils.backbone_utils import load_backbone
import torch
from .configuration_dpt import DPTConfig
from ...utils import ModelOutput, auto_docstring, logging, torch_int
import collections.abc
from typing import Callable, Optional
class DPTViTHybridEmbeddings(nn.Module):
"""
This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial
`hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a
Transformer.
"""
def __init__(self, config: DPTConfig, feature_size: Optional[tuple[int, int]]=None):
super().__init__()
image_size, patch_size = (config.image_size, config.patch_size)
num_channels, hidden_size = (config.num_channels, config.hidden_size)
image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size)
patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size)
num_patches = image_size[1] // patch_size[1] * (image_size[0] // patch_size[0])
self.backbone = load_backbone(config)
feature_dim = self.backbone.channels[-1]
if len(self.backbone.channels) != 3:
raise ValueError(f'Expected backbone to have 3 output features, got {len(self.backbone.channels)}')
self.residual_feature_map_index = [0, 1]
if feature_size is None:
feat_map_shape = config.backbone_featmap_shape
feature_size = feat_map_shape[-2:]
feature_dim = feat_map_shape[1]
else:
feature_size = feature_size if isinstance(feature_size, collections.abc.Iterable) else (feature_size, feature_size)
feature_dim = self.backbone.channels[-1]
self.image_size = image_size
self.patch_size = patch_size[0]
self.num_channels = num_channels
self.projection = nn.Conv2d(feature_dim, hidden_size, kernel_size=1)
self.cls_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size))
self.position_embeddings = nn.Parameter(torch.zeros(1, num_patches + 1, config.hidden_size))
def _resize_pos_embed(self, posemb, grid_size_height, grid_size_width, start_index=1):
posemb_tok = posemb[:, :start_index]
posemb_grid = posemb[0, start_index:]
old_grid_size = torch_int(len(posemb_grid) ** 0.5)
posemb_grid = posemb_grid.reshape(1, old_grid_size, old_grid_size, -1).permute(0, 3, 1, 2)
posemb_grid = nn.functional.interpolate(posemb_grid, size=(grid_size_height, grid_size_width), mode='bilinear')
posemb_grid = posemb_grid.permute(0, 2, 3, 1).reshape(1, grid_size_height * grid_size_width, -1)
posemb = torch.cat([posemb_tok, posemb_grid], dim=1)
return posemb
def forward(self, pixel_values: torch.Tensor, interpolate_pos_encoding: bool=False) -> BaseModelOutputWithIntermediateActivations:
batch_size, num_channels, height, width = pixel_values.shape
if num_channels != self.num_channels:
raise ValueError('Make sure that the channel dimension of the pixel values match with the one set in the configuration.')
if not interpolate_pos_encoding:
if height != self.image_size[0] or width != self.image_size[1]:
raise ValueError(f"Input image size ({height}*{width}) doesn't match model ({self.image_size[0]}*{self.image_size[1]}).")
position_embeddings = self._resize_pos_embed(self.position_embeddings, height // self.patch_size, width // self.patch_size)
backbone_output = self.backbone(pixel_values)
features = backbone_output.feature_maps[-1]
output_hidden_states = [backbone_output.feature_maps[index] for index in self.residual_feature_map_index]
embeddings = self.projection(features).flatten(2).transpose(1, 2)
cls_tokens = self.cls_token.expand(batch_size, -1, -1)
embeddings = torch.cat((cls_tokens, embeddings), dim=1)
embeddings = embeddings + position_embeddings
return BaseModelOutputWithIntermediateActivations(last_hidden_states=embeddings, intermediate_activations=output_hidden_states)
|
class DPTViTHybridEmbeddings(nn.Module):
'''
This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial
`hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a
Transformer.
'''
def __init__(self, config: DPTConfig, feature_size: Optional[tuple[int, int]]=None):
pass
def _resize_pos_embed(self, posemb, grid_size_height, grid_size_width, start_index=1):
pass
def forward(self, pixel_values: torch.Tensor, interpolate_pos_encoding: bool=False) -> BaseModelOutputWithIntermediateActivations:
pass
| 4
| 1
| 29
| 6
| 22
| 1
| 4
| 0.13
| 1
| 6
| 1
| 0
| 3
| 8
| 3
| 13
| 97
| 22
| 67
| 29
| 61
| 9
| 52
| 27
| 48
| 6
| 1
| 2
| 12
|
2,275
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/dpt/modeling_dpt.py
|
transformers.models.dpt.modeling_dpt.DPTViTIntermediate
|
import torch
from .configuration_dpt import DPTConfig
from torch import nn
from ...activations import ACT2FN
class DPTViTIntermediate(nn.Module):
def __init__(self, config: DPTConfig):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
|
class DPTViTIntermediate(nn.Module):
def __init__(self, config: DPTConfig):
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 6
| 1
| 6
| 0
| 2
| 0
| 1
| 4
| 1
| 0
| 2
| 2
| 2
| 12
| 14
| 2
| 12
| 5
| 9
| 0
| 11
| 5
| 8
| 2
| 1
| 1
| 3
|
2,276
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/dpt/modeling_dpt.py
|
transformers.models.dpt.modeling_dpt.DPTViTLayer
|
from torch import nn
import torch
from typing import Callable, Optional
from ...modeling_layers import GradientCheckpointingLayer
from .configuration_dpt import DPTConfig
class DPTViTLayer(GradientCheckpointingLayer):
"""This corresponds to the Block class in the timm implementation."""
def __init__(self, config: DPTConfig):
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = DPTViTAttention(config)
self.intermediate = DPTViTIntermediate(config)
self.output = DPTViTOutput(config)
self.layernorm_before = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.layernorm_after = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor]=None) -> torch.Tensor:
hidden_states_norm = self.layernorm_before(hidden_states)
attention_output = self.attention(hidden_states_norm, head_mask)
hidden_states = attention_output + hidden_states
layer_output = self.layernorm_after(hidden_states)
layer_output = self.intermediate(layer_output)
layer_output = self.output(layer_output, hidden_states)
return layer_output
|
class DPTViTLayer(GradientCheckpointingLayer):
'''This corresponds to the Block class in the timm implementation.'''
def __init__(self, config: DPTConfig):
pass
def forward(self, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor]=None) -> torch.Tensor:
pass
| 3
| 1
| 18
| 3
| 14
| 3
| 1
| 0.21
| 1
| 7
| 4
| 0
| 2
| 7
| 2
| 12
| 40
| 7
| 29
| 19
| 21
| 6
| 20
| 14
| 17
| 1
| 1
| 0
| 2
|
2,277
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/dpt/modeling_dpt.py
|
transformers.models.dpt.modeling_dpt.DPTViTOutput
|
import torch
from .configuration_dpt import DPTConfig
from torch import nn
class DPTViTOutput(nn.Module):
def __init__(self, config: DPTConfig):
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = hidden_states + input_tensor
return hidden_states
|
class DPTViTOutput(nn.Module):
def __init__(self, config: DPTConfig):
pass
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 6
| 1
| 5
| 0
| 1
| 0
| 1
| 3
| 1
| 0
| 2
| 2
| 2
| 12
| 13
| 3
| 10
| 5
| 7
| 0
| 10
| 5
| 7
| 1
| 1
| 0
| 2
|
2,278
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/dpt/modeling_dpt.py
|
transformers.models.dpt.modeling_dpt.DPTViTPatchEmbeddings
|
import torch
from .configuration_dpt import DPTConfig
import collections.abc
from torch import nn
class DPTViTPatchEmbeddings(nn.Module):
"""
Image to Patch Embedding.
"""
def __init__(self, config: DPTConfig):
super().__init__()
image_size, patch_size = (config.image_size, config.patch_size)
num_channels, hidden_size = (config.num_channels, config.hidden_size)
image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size)
patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size)
num_patches = image_size[1] // patch_size[1] * (image_size[0] // patch_size[0])
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.num_patches = num_patches
self.projection = nn.Conv2d(num_channels, hidden_size, kernel_size=patch_size, stride=patch_size)
def forward(self, pixel_values: torch.Tensor) -> torch.Tensor:
batch_size, num_channels, height, width = pixel_values.shape
if num_channels != self.num_channels:
raise ValueError('Make sure that the channel dimension of the pixel values match with the one set in the configuration.')
embeddings = self.projection(pixel_values).flatten(2).transpose(1, 2)
return embeddings
|
class DPTViTPatchEmbeddings(nn.Module):
'''
Image to Patch Embedding.
'''
def __init__(self, config: DPTConfig):
pass
def forward(self, pixel_values: torch.Tensor) -> torch.Tensor:
pass
| 3
| 1
| 11
| 1
| 10
| 0
| 3
| 0.14
| 1
| 3
| 0
| 0
| 2
| 5
| 2
| 12
| 29
| 5
| 21
| 13
| 18
| 3
| 19
| 13
| 16
| 3
| 1
| 1
| 5
|
2,279
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/dpt/modeling_dpt.py
|
transformers.models.dpt.modeling_dpt.DPTViTPooler
|
from ...activations import ACT2FN
from torch import nn
from .configuration_dpt import DPTConfig
import torch
class DPTViTPooler(nn.Module):
def __init__(self, config: DPTConfig):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.pooler_output_size)
self.activation = ACT2FN[config.pooler_act]
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
|
class DPTViTPooler(nn.Module):
def __init__(self, config: DPTConfig):
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 6
| 0
| 5
| 1
| 1
| 0.2
| 1
| 2
| 1
| 0
| 2
| 2
| 2
| 12
| 13
| 1
| 10
| 7
| 7
| 2
| 10
| 7
| 7
| 1
| 1
| 0
| 2
|
2,280
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/dpt/modeling_dpt.py
|
transformers.models.dpt.modeling_dpt.DPTViTSelfOutput
|
import torch
from .configuration_dpt import DPTConfig
from torch import nn
class DPTViTSelfOutput(nn.Module):
"""
The residual connection is defined in ViTLayer instead of here (as is the case with other models), due to the
layernorm applied before each block.
"""
def __init__(self, config: DPTConfig):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
return hidden_states
|
class DPTViTSelfOutput(nn.Module):
'''
The residual connection is defined in ViTLayer instead of here (as is the case with other models), due to the
layernorm applied before each block.
'''
def __init__(self, config: DPTConfig):
pass
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
pass
| 3
| 1
| 5
| 1
| 4
| 0
| 1
| 0.44
| 1
| 3
| 1
| 0
| 2
| 2
| 2
| 12
| 16
| 3
| 9
| 5
| 6
| 4
| 9
| 5
| 6
| 1
| 1
| 0
| 2
|
2,281
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/efficientnet/configuration_efficientnet.py
|
transformers.models.efficientnet.configuration_efficientnet.EfficientNetConfig
|
from ...configuration_utils import PretrainedConfig
class EfficientNetConfig(PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`EfficientNetModel`]. It is used to instantiate an
EfficientNet model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the EfficientNet
[google/efficientnet-b7](https://huggingface.co/google/efficientnet-b7) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
num_channels (`int`, *optional*, defaults to 3):
The number of input channels.
image_size (`int`, *optional*, defaults to 600):
The input image size.
width_coefficient (`float`, *optional*, defaults to 2.0):
Scaling coefficient for network width at each stage.
depth_coefficient (`float`, *optional*, defaults to 3.1):
Scaling coefficient for network depth at each stage.
depth_divisor `int`, *optional*, defaults to 8):
A unit of network width.
kernel_sizes (`list[int]`, *optional*, defaults to `[3, 3, 5, 3, 5, 5, 3]`):
List of kernel sizes to be used in each block.
in_channels (`list[int]`, *optional*, defaults to `[32, 16, 24, 40, 80, 112, 192]`):
List of input channel sizes to be used in each block for convolutional layers.
out_channels (`list[int]`, *optional*, defaults to `[16, 24, 40, 80, 112, 192, 320]`):
List of output channel sizes to be used in each block for convolutional layers.
depthwise_padding (`list[int]`, *optional*, defaults to `[]`):
List of block indices with square padding.
strides (`list[int]`, *optional*, defaults to `[1, 2, 2, 2, 1, 2, 1]`):
List of stride sizes to be used in each block for convolutional layers.
num_block_repeats (`list[int]`, *optional*, defaults to `[1, 2, 2, 3, 3, 4, 1]`):
List of the number of times each block is to repeated.
expand_ratios (`list[int]`, *optional*, defaults to `[1, 6, 6, 6, 6, 6, 6]`):
List of scaling coefficient of each block.
squeeze_expansion_ratio (`float`, *optional*, defaults to 0.25):
Squeeze expansion ratio.
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in each block. If string, `"gelu"`, `"relu"`,
`"selu", `"gelu_new"`, `"silu"` and `"mish"` are supported.
hidden_dim (`int`, *optional*, defaults to 1280):
The hidden dimension of the layer before the classification head.
pooling_type (`str` or `function`, *optional*, defaults to `"mean"`):
Type of final pooling to be applied before the dense classification head. Available options are [`"mean"`,
`"max"`]
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
batch_norm_eps (`float`, *optional*, defaults to 1e-3):
The epsilon used by the batch normalization layers.
batch_norm_momentum (`float`, *optional*, defaults to 0.99):
The momentum used by the batch normalization layers.
dropout_rate (`float`, *optional*, defaults to 0.5):
The dropout rate to be applied before final classifier layer.
drop_connect_rate (`float`, *optional*, defaults to 0.2):
The drop rate for skip connections.
Example:
```python
>>> from transformers import EfficientNetConfig, EfficientNetModel
>>> # Initializing a EfficientNet efficientnet-b7 style configuration
>>> configuration = EfficientNetConfig()
>>> # Initializing a model (with random weights) from the efficientnet-b7 style configuration
>>> model = EfficientNetModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = 'efficientnet'
def __init__(self, num_channels: int=3, image_size: int=600, width_coefficient: float=2.0, depth_coefficient: float=3.1, depth_divisor: int=8, kernel_sizes: list[int]=[3, 3, 5, 3, 5, 5, 3], in_channels: list[int]=[32, 16, 24, 40, 80, 112, 192], out_channels: list[int]=[16, 24, 40, 80, 112, 192, 320], depthwise_padding: list[int]=[], strides: list[int]=[1, 2, 2, 2, 1, 2, 1], num_block_repeats: list[int]=[1, 2, 2, 3, 3, 4, 1], expand_ratios: list[int]=[1, 6, 6, 6, 6, 6, 6], squeeze_expansion_ratio: float=0.25, hidden_act: str='swish', hidden_dim: int=2560, pooling_type: str='mean', initializer_range: float=0.02, batch_norm_eps: float=0.001, batch_norm_momentum: float=0.99, dropout_rate: float=0.5, drop_connect_rate: float=0.2, **kwargs):
super().__init__(**kwargs)
self.num_channels = num_channels
self.image_size = image_size
self.width_coefficient = width_coefficient
self.depth_coefficient = depth_coefficient
self.depth_divisor = depth_divisor
self.kernel_sizes = kernel_sizes
self.in_channels = in_channels
self.out_channels = out_channels
self.depthwise_padding = depthwise_padding
self.strides = strides
self.num_block_repeats = num_block_repeats
self.expand_ratios = expand_ratios
self.squeeze_expansion_ratio = squeeze_expansion_ratio
self.hidden_act = hidden_act
self.hidden_dim = hidden_dim
self.pooling_type = pooling_type
self.initializer_range = initializer_range
self.batch_norm_eps = batch_norm_eps
self.batch_norm_momentum = batch_norm_momentum
self.dropout_rate = dropout_rate
self.drop_connect_rate = drop_connect_rate
self.num_hidden_layers = sum(num_block_repeats) * 4
|
class EfficientNetConfig(PretrainedConfig):
'''
This is the configuration class to store the configuration of a [`EfficientNetModel`]. It is used to instantiate an
EfficientNet model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the EfficientNet
[google/efficientnet-b7](https://huggingface.co/google/efficientnet-b7) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
num_channels (`int`, *optional*, defaults to 3):
The number of input channels.
image_size (`int`, *optional*, defaults to 600):
The input image size.
width_coefficient (`float`, *optional*, defaults to 2.0):
Scaling coefficient for network width at each stage.
depth_coefficient (`float`, *optional*, defaults to 3.1):
Scaling coefficient for network depth at each stage.
depth_divisor `int`, *optional*, defaults to 8):
A unit of network width.
kernel_sizes (`list[int]`, *optional*, defaults to `[3, 3, 5, 3, 5, 5, 3]`):
List of kernel sizes to be used in each block.
in_channels (`list[int]`, *optional*, defaults to `[32, 16, 24, 40, 80, 112, 192]`):
List of input channel sizes to be used in each block for convolutional layers.
out_channels (`list[int]`, *optional*, defaults to `[16, 24, 40, 80, 112, 192, 320]`):
List of output channel sizes to be used in each block for convolutional layers.
depthwise_padding (`list[int]`, *optional*, defaults to `[]`):
List of block indices with square padding.
strides (`list[int]`, *optional*, defaults to `[1, 2, 2, 2, 1, 2, 1]`):
List of stride sizes to be used in each block for convolutional layers.
num_block_repeats (`list[int]`, *optional*, defaults to `[1, 2, 2, 3, 3, 4, 1]`):
List of the number of times each block is to repeated.
expand_ratios (`list[int]`, *optional*, defaults to `[1, 6, 6, 6, 6, 6, 6]`):
List of scaling coefficient of each block.
squeeze_expansion_ratio (`float`, *optional*, defaults to 0.25):
Squeeze expansion ratio.
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in each block. If string, `"gelu"`, `"relu"`,
`"selu", `"gelu_new"`, `"silu"` and `"mish"` are supported.
hidden_dim (`int`, *optional*, defaults to 1280):
The hidden dimension of the layer before the classification head.
pooling_type (`str` or `function`, *optional*, defaults to `"mean"`):
Type of final pooling to be applied before the dense classification head. Available options are [`"mean"`,
`"max"`]
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
batch_norm_eps (`float`, *optional*, defaults to 1e-3):
The epsilon used by the batch normalization layers.
batch_norm_momentum (`float`, *optional*, defaults to 0.99):
The momentum used by the batch normalization layers.
dropout_rate (`float`, *optional*, defaults to 0.5):
The dropout rate to be applied before final classifier layer.
drop_connect_rate (`float`, *optional*, defaults to 0.2):
The drop rate for skip connections.
Example:
```python
>>> from transformers import EfficientNetConfig, EfficientNetModel
>>> # Initializing a EfficientNet efficientnet-b7 style configuration
>>> configuration = EfficientNetConfig()
>>> # Initializing a model (with random weights) from the efficientnet-b7 style configuration
>>> model = EfficientNetModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```'''
def __init__(self, num_channels: int=3, image_size: int=600, width_coefficient: float=2.0, depth_coefficient: float=3.1, depth_divisor: int=8, kernel_sizes: list[int]=[3, 3, 5, 3, 5, 5, 3], in_channels: list[int]=[32, 16, 24, 40, 80, 112, 192], out_channels: list[int]=[16, 24, 40, 80, 112, 192, 320], depthwise_padding: list[int]=[], strides: list[int]=[1, 2, 2, 2, 1, 2, 1], num_block_repeats: list[int]=[1, 2, 2, 3, 3, 4, 1], expand_ratios: list[int]=[1, 6, 6, 6, 6, 6, 6], squeeze_expansion_ratio: float=0.25, hidden_act: str='swish', hidden_dim: int=2560, pooling_type: str='mean', initializer_range: float=0.02, batch_norm_eps: float=0.001, batch_norm_momentum: float=0.99, dropout_rate: float=0.5, drop_connect_rate: float=0.2, **kwargs):
pass
| 2
| 1
| 49
| 1
| 48
| 0
| 1
| 1.24
| 1
| 4
| 0
| 0
| 1
| 22
| 1
| 1
| 121
| 9
| 50
| 49
| 24
| 62
| 26
| 25
| 24
| 1
| 1
| 0
| 1
|
2,282
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/efficientnet/configuration_efficientnet.py
|
transformers.models.efficientnet.configuration_efficientnet.EfficientNetOnnxConfig
|
from packaging import version
from collections.abc import Mapping
from ...onnx import OnnxConfig
from collections import OrderedDict
class EfficientNetOnnxConfig(OnnxConfig):
torch_onnx_minimum_version = version.parse('1.11')
@property
def inputs(self) -> Mapping[str, Mapping[int, str]]:
return OrderedDict([('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'})])
@property
def atol_for_validation(self) -> float:
return 1e-05
|
class EfficientNetOnnxConfig(OnnxConfig):
@property
def inputs(self) -> Mapping[str, Mapping[int, str]]:
pass
@property
def atol_for_validation(self) -> float:
pass
| 5
| 0
| 4
| 0
| 4
| 0
| 1
| 0
| 1
| 4
| 0
| 0
| 2
| 0
| 2
| 2
| 14
| 2
| 12
| 6
| 7
| 0
| 6
| 4
| 3
| 1
| 1
| 0
| 2
|
2,283
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/efficientnet/image_processing_efficientnet.py
|
transformers.models.efficientnet.image_processing_efficientnet.EfficientNetImageProcessor
|
import numpy as np
from ...image_utils import IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, infer_channel_dimension_format, is_scaled_image, make_flat_list_of_images, to_numpy_array, valid_images, validate_preprocess_arguments
from typing import Optional, Union
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...utils import TensorType, filter_out_non_signature_kwargs, is_vision_available, logging
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
class EfficientNetImageProcessor(BaseImageProcessor):
"""
Constructs a EfficientNet image processor.
Args:
do_resize (`bool`, *optional*, defaults to `True`):
Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by
`do_resize` in `preprocess`.
size (`dict[str, int]` *optional*, defaults to `{"height": 346, "width": 346}`):
Size of the image after `resize`. Can be overridden by `size` in `preprocess`.
resample (`PILImageResampling` filter, *optional*, defaults to 0):
Resampling filter to use if resizing the image. Can be overridden by `resample` in `preprocess`.
do_center_crop (`bool`, *optional*, defaults to `False`):
Whether to center crop the image. If the input size is smaller than `crop_size` along any edge, the image
is padded with 0's and then center cropped. Can be overridden by `do_center_crop` in `preprocess`.
crop_size (`dict[str, int]`, *optional*, defaults to `{"height": 289, "width": 289}`):
Desired output size when applying center-cropping. Can be overridden by `crop_size` in `preprocess`.
rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the
`preprocess` method.
rescale_offset (`bool`, *optional*, defaults to `False`):
Whether to rescale the image between [-scale_range, scale_range] instead of [0, scale_range]. Can be
overridden by the `rescale_factor` parameter in the `preprocess` method.
do_rescale (`bool`, *optional*, defaults to `True`):
Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale`
parameter in the `preprocess` method.
do_normalize (`bool`, *optional*, defaults to `True`):
Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess`
method.
image_mean (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`):
Mean to use if normalizing the image. This is a float or list of floats the length of the number of
channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
image_std (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`):
Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
include_top (`bool`, *optional*, defaults to `True`):
Whether to rescale the image again. Should be set to True if the inputs are used for image classification.
"""
model_input_names = ['pixel_values']
def __init__(self, do_resize: bool=True, size: Optional[dict[str, int]]=None, resample: PILImageResampling=PIL.Image.NEAREST, do_center_crop: bool=False, crop_size: Optional[dict[str, int]]=None, rescale_factor: Union[int, float]=1 / 255, rescale_offset: bool=False, do_rescale: bool=True, do_normalize: bool=True, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, include_top: bool=True, **kwargs) -> None:
super().__init__(**kwargs)
size = size if size is not None else {'height': 346, 'width': 346}
size = get_size_dict(size)
crop_size = crop_size if crop_size is not None else {'height': 289, 'width': 289}
crop_size = get_size_dict(crop_size, param_name='crop_size')
self.do_resize = do_resize
self.size = size
self.resample = resample
self.do_center_crop = do_center_crop
self.crop_size = crop_size
self.do_rescale = do_rescale
self.rescale_factor = rescale_factor
self.rescale_offset = rescale_offset
self.do_normalize = do_normalize
self.image_mean = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
self.image_std = image_std if image_std is not None else IMAGENET_STANDARD_STD
self.include_top = include_top
def resize(self, image: np.ndarray, size: dict[str, int], resample: PILImageResampling=PILImageResampling.NEAREST, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray:
"""
Resize an image to `(size["height"], size["width"])`.
Args:
image (`np.ndarray`):
Image to resize.
size (`dict[str, int]`):
Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image.
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.NEAREST`):
`PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.NEAREST`.
data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the output image. If unset, the channel dimension format of the input
image is used. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
Returns:
`np.ndarray`: The resized image.
"""
size = get_size_dict(size)
if 'height' not in size or 'width' not in size:
raise ValueError(f'The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}')
output_size = (size['height'], size['width'])
return resize(image, size=output_size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs)
def rescale(self, image: np.ndarray, scale: Union[int, float], offset: bool=True, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs):
"""
Rescale an image by a scale factor.
If `offset` is `True`, the image has its values rescaled by `scale` and then offset by 1. If `scale` is
1/127.5, the image is rescaled between [-1, 1].
image = image * scale - 1
If `offset` is `False`, and `scale` is 1/255, the image is rescaled between [0, 1].
image = image * scale
Args:
image (`np.ndarray`):
Image to rescale.
scale (`int` or `float`):
Scale to apply to the image.
offset (`bool`, *optional*):
Whether to scale the image in both negative and positive directions.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the image. If not provided, it will be the same as the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
"""
rescaled_image = rescale(image, scale=scale, data_format=data_format, input_data_format=input_data_format, **kwargs)
if offset:
rescaled_image = rescaled_image - 1
return rescaled_image
@filter_out_non_signature_kwargs()
def preprocess(self, images: ImageInput, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, resample=None, do_center_crop: Optional[bool]=None, crop_size: Optional[dict[str, int]]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, rescale_offset: Optional[bool]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, include_top: Optional[bool]=None, return_tensors: Optional[Union[str, TensorType]]=None, data_format: ChannelDimension=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> PIL.Image.Image:
"""
Preprocess an image or batch of images.
Args:
images (`ImageInput`):
Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
passing in images with pixel values between 0 and 1, set `do_rescale=False`.
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
Whether to resize the image.
size (`dict[str, int]`, *optional*, defaults to `self.size`):
Size of the image after `resize`.
resample (`PILImageResampling`, *optional*, defaults to `self.resample`):
PILImageResampling filter to use if resizing the image Only has an effect if `do_resize` is set to
`True`.
do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`):
Whether to center crop the image.
crop_size (`dict[str, int]`, *optional*, defaults to `self.crop_size`):
Size of the image after center crop. If one edge the image is smaller than `crop_size`, it will be
padded with zeros and then cropped
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the image values between [0 - 1].
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
rescale_offset (`bool`, *optional*, defaults to `self.rescale_offset`):
Whether to rescale the image between [-scale_range, scale_range] instead of [0, scale_range].
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
Whether to normalize the image.
image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`):
Image mean.
image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`):
Image standard deviation.
include_top (`bool`, *optional*, defaults to `self.include_top`):
Rescales the image again for image classification if set to True.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- `None`: Return a list of `np.ndarray`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. Can be one of:
- `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `ChannelDimension.LAST`: image in (height, width, num_channels) format.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
"""
do_resize = do_resize if do_resize is not None else self.do_resize
resample = resample if resample is not None else self.resample
do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop
do_rescale = do_rescale if do_rescale is not None else self.do_rescale
rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
rescale_offset = rescale_offset if rescale_offset is not None else self.rescale_offset
do_normalize = do_normalize if do_normalize is not None else self.do_normalize
image_mean = image_mean if image_mean is not None else self.image_mean
image_std = image_std if image_std is not None else self.image_std
include_top = include_top if include_top is not None else self.include_top
size = size if size is not None else self.size
size = get_size_dict(size)
crop_size = crop_size if crop_size is not None else self.crop_size
crop_size = get_size_dict(crop_size, param_name='crop_size')
images = make_flat_list_of_images(images)
if not valid_images(images):
raise ValueError('Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, or torch.Tensor')
validate_preprocess_arguments(do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_center_crop=do_center_crop, crop_size=crop_size, do_resize=do_resize, size=size, resample=resample)
images = [to_numpy_array(image) for image in images]
if do_rescale and is_scaled_image(images[0]):
logger.warning_once('It looks like you are trying to rescale already rescaled images. If the input images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again.')
if input_data_format is None:
input_data_format = infer_channel_dimension_format(images[0])
if do_resize:
images = [self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format) for image in images]
if do_center_crop:
images = [self.center_crop(image=image, size=crop_size, input_data_format=input_data_format) for image in images]
if do_rescale:
images = [self.rescale(image=image, scale=rescale_factor, offset=rescale_offset, input_data_format=input_data_format) for image in images]
if do_normalize:
images = [self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format) for image in images]
if include_top:
images = [self.normalize(image=image, mean=0, std=image_std, input_data_format=input_data_format) for image in images]
images = [to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images]
data = {'pixel_values': images}
return BatchFeature(data=data, tensor_type=return_tensors)
|
class EfficientNetImageProcessor(BaseImageProcessor):
'''
Constructs a EfficientNet image processor.
Args:
do_resize (`bool`, *optional*, defaults to `True`):
Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by
`do_resize` in `preprocess`.
size (`dict[str, int]` *optional*, defaults to `{"height": 346, "width": 346}`):
Size of the image after `resize`. Can be overridden by `size` in `preprocess`.
resample (`PILImageResampling` filter, *optional*, defaults to 0):
Resampling filter to use if resizing the image. Can be overridden by `resample` in `preprocess`.
do_center_crop (`bool`, *optional*, defaults to `False`):
Whether to center crop the image. If the input size is smaller than `crop_size` along any edge, the image
is padded with 0's and then center cropped. Can be overridden by `do_center_crop` in `preprocess`.
crop_size (`dict[str, int]`, *optional*, defaults to `{"height": 289, "width": 289}`):
Desired output size when applying center-cropping. Can be overridden by `crop_size` in `preprocess`.
rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the
`preprocess` method.
rescale_offset (`bool`, *optional*, defaults to `False`):
Whether to rescale the image between [-scale_range, scale_range] instead of [0, scale_range]. Can be
overridden by the `rescale_factor` parameter in the `preprocess` method.
do_rescale (`bool`, *optional*, defaults to `True`):
Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale`
parameter in the `preprocess` method.
do_normalize (`bool`, *optional*, defaults to `True`):
Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess`
method.
image_mean (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`):
Mean to use if normalizing the image. This is a float or list of floats the length of the number of
channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
image_std (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`):
Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
include_top (`bool`, *optional*, defaults to `True`):
Whether to rescale the image again. Should be set to True if the inputs are used for image classification.
'''
def __init__(self, do_resize: bool=True, size: Optional[dict[str, int]]=None, resample: PILImageResampling=PIL.Image.NEAREST, do_center_crop: bool=False, crop_size: Optional[dict[str, int]]=None, rescale_factor: Union[int, float]=1 / 255, rescale_offset: bool=False, do_rescale: bool=True, do_normalize: bool=True, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, include_top: bool=True, **kwargs) -> None:
pass
def resize(self, image: np.ndarray, size: dict[str, int], resample: PILImageResampling=PILImageResampling.NEAREST, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray:
'''
Resize an image to `(size["height"], size["width"])`.
Args:
image (`np.ndarray`):
Image to resize.
size (`dict[str, int]`):
Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image.
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.NEAREST`):
`PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.NEAREST`.
data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the output image. If unset, the channel dimension format of the input
image is used. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
Returns:
`np.ndarray`: The resized image.
'''
pass
def rescale(self, image: np.ndarray, scale: Union[int, float], offset: bool=True, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs):
'''
Rescale an image by a scale factor.
If `offset` is `True`, the image has its values rescaled by `scale` and then offset by 1. If `scale` is
1/127.5, the image is rescaled between [-1, 1].
image = image * scale - 1
If `offset` is `False`, and `scale` is 1/255, the image is rescaled between [0, 1].
image = image * scale
Args:
image (`np.ndarray`):
Image to rescale.
scale (`int` or `float`):
Scale to apply to the image.
offset (`bool`, *optional*):
Whether to scale the image in both negative and positive directions.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the image. If not provided, it will be the same as the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
'''
pass
@filter_out_non_signature_kwargs()
def preprocess(self, images: ImageInput, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, resample=None, do_center_crop: Optional[bool]=None, crop_size: Optional[dict[str, int]]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, rescale_offset: Optional[bool]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, include_top: Optional[bool]=None, return_tensors: Optional[Union[str, TensorType]]=None, data_format: ChannelDimension=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> PIL.Image.Image:
'''
Preprocess an image or batch of images.
Args:
images (`ImageInput`):
Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
passing in images with pixel values between 0 and 1, set `do_rescale=False`.
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
Whether to resize the image.
size (`dict[str, int]`, *optional*, defaults to `self.size`):
Size of the image after `resize`.
resample (`PILImageResampling`, *optional*, defaults to `self.resample`):
PILImageResampling filter to use if resizing the image Only has an effect if `do_resize` is set to
`True`.
do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`):
Whether to center crop the image.
crop_size (`dict[str, int]`, *optional*, defaults to `self.crop_size`):
Size of the image after center crop. If one edge the image is smaller than `crop_size`, it will be
padded with zeros and then cropped
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the image values between [0 - 1].
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
rescale_offset (`bool`, *optional*, defaults to `self.rescale_offset`):
Whether to rescale the image between [-scale_range, scale_range] instead of [0, scale_range].
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
Whether to normalize the image.
image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`):
Image mean.
image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`):
Image standard deviation.
include_top (`bool`, *optional*, defaults to `self.include_top`):
Rescales the image again for image classification if set to True.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- `None`: Return a list of `np.ndarray`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. Can be one of:
- `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `ChannelDimension.LAST`: image in (height, width, num_channels) format.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
'''
pass
| 6
| 4
| 69
| 5
| 40
| 24
| 8
| 0.81
| 1
| 8
| 2
| 0
| 4
| 12
| 4
| 24
| 321
| 27
| 162
| 71
| 107
| 132
| 68
| 21
| 63
| 21
| 3
| 1
| 30
|
2,284
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/efficientnet/modeling_efficientnet.py
|
transformers.models.efficientnet.modeling_efficientnet.EfficientNetBlock
|
from torch import nn
import torch
from .configuration_efficientnet import EfficientNetConfig
class EfficientNetBlock(nn.Module):
"""
This corresponds to the expansion and depthwise convolution phase of each block in the original implementation.
Args:
config ([`EfficientNetConfig`]):
Model configuration class.
in_dim (`int`):
Number of input channels.
out_dim (`int`):
Number of output channels.
stride (`int`):
Stride size to be used in convolution layers.
expand_ratio (`int`):
Expand ratio to set the output dimensions for the expansion and squeeze-excite layers.
kernel_size (`int`):
Kernel size for the depthwise convolution layer.
drop_rate (`float`):
Dropout rate to be used in the final phase of each block.
id_skip (`bool`):
Whether to apply dropout and sum the final hidden states with the input embeddings during the final phase
of each block. Set to `True` for the first block of each stage.
adjust_padding (`bool`):
Whether to apply padding to only right and bottom side of the input kernel before the depthwise convolution
operation, set to `True` for inputs with odd input sizes.
"""
def __init__(self, config: EfficientNetConfig, in_dim: int, out_dim: int, stride: int, expand_ratio: int, kernel_size: int, drop_rate: float, id_skip: bool, adjust_padding: bool):
super().__init__()
self.expand_ratio = expand_ratio
self.expand = self.expand_ratio != 1
expand_in_dim = in_dim * expand_ratio
if self.expand:
self.expansion = EfficientNetExpansionLayer(config=config, in_dim=in_dim, out_dim=expand_in_dim, stride=stride)
self.depthwise_conv = EfficientNetDepthwiseLayer(config=config, in_dim=expand_in_dim if self.expand else in_dim, stride=stride, kernel_size=kernel_size, adjust_padding=adjust_padding)
self.squeeze_excite = EfficientNetSqueezeExciteLayer(config=config, in_dim=in_dim, expand_dim=expand_in_dim, expand=self.expand)
self.projection = EfficientNetFinalBlockLayer(config=config, in_dim=expand_in_dim if self.expand else in_dim, out_dim=out_dim, stride=stride, drop_rate=drop_rate, id_skip=id_skip)
def forward(self, hidden_states: torch.FloatTensor) -> torch.Tensor:
embeddings = hidden_states
if self.expand_ratio != 1:
hidden_states = self.expansion(hidden_states)
hidden_states = self.depthwise_conv(hidden_states)
hidden_states = self.squeeze_excite(hidden_states)
hidden_states = self.projection(embeddings, hidden_states)
return hidden_states
|
class EfficientNetBlock(nn.Module):
'''
This corresponds to the expansion and depthwise convolution phase of each block in the original implementation.
Args:
config ([`EfficientNetConfig`]):
Model configuration class.
in_dim (`int`):
Number of input channels.
out_dim (`int`):
Number of output channels.
stride (`int`):
Stride size to be used in convolution layers.
expand_ratio (`int`):
Expand ratio to set the output dimensions for the expansion and squeeze-excite layers.
kernel_size (`int`):
Kernel size for the depthwise convolution layer.
drop_rate (`float`):
Dropout rate to be used in the final phase of each block.
id_skip (`bool`):
Whether to apply dropout and sum the final hidden states with the input embeddings during the final phase
of each block. Set to `True` for the first block of each stage.
adjust_padding (`bool`):
Whether to apply padding to only right and bottom side of the input kernel before the depthwise convolution
operation, set to `True` for inputs with odd input sizes.
'''
def __init__(self, config: EfficientNetConfig, in_dim: int, out_dim: int, stride: int, expand_ratio: int, kernel_size: int, drop_rate: float, id_skip: bool, adjust_padding: bool):
pass
def forward(self, hidden_states: torch.FloatTensor) -> torch.Tensor:
pass
| 3
| 1
| 26
| 2
| 23
| 1
| 4
| 0.55
| 1
| 10
| 5
| 0
| 2
| 6
| 2
| 12
| 79
| 6
| 47
| 22
| 33
| 26
| 19
| 11
| 16
| 5
| 1
| 1
| 7
|
2,285
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/efficientnet/modeling_efficientnet.py
|
transformers.models.efficientnet.modeling_efficientnet.EfficientNetDepthwiseConv2d
|
from torch import nn
class EfficientNetDepthwiseConv2d(nn.Conv2d):
def __init__(self, in_channels, depth_multiplier=1, kernel_size=3, stride=1, padding=0, dilation=1, bias=True, padding_mode='zeros'):
out_channels = in_channels * depth_multiplier
super().__init__(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=in_channels, bias=bias, padding_mode=padding_mode)
|
class EfficientNetDepthwiseConv2d(nn.Conv2d):
def __init__(self, in_channels, depth_multiplier=1, kernel_size=3, stride=1, padding=0, dilation=1, bias=True, padding_mode='zeros'):
pass
| 2
| 0
| 23
| 0
| 23
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 1
| 1
| 24
| 0
| 24
| 13
| 12
| 0
| 4
| 3
| 2
| 1
| 1
| 0
| 1
|
2,286
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/efficientnet/modeling_efficientnet.py
|
transformers.models.efficientnet.modeling_efficientnet.EfficientNetDepthwiseLayer
|
from torch import nn
from .configuration_efficientnet import EfficientNetConfig
import torch
from ...activations import ACT2FN
class EfficientNetDepthwiseLayer(nn.Module):
"""
This corresponds to the depthwise convolution phase of each block in the original implementation.
"""
def __init__(self, config: EfficientNetConfig, in_dim: int, stride: int, kernel_size: int, adjust_padding: bool):
super().__init__()
self.stride = stride
conv_pad = 'valid' if self.stride == 2 else 'same'
padding = correct_pad(kernel_size, adjust=adjust_padding)
self.depthwise_conv_pad = nn.ZeroPad2d(padding=padding)
self.depthwise_conv = EfficientNetDepthwiseConv2d(in_dim, kernel_size=kernel_size, stride=stride, padding=conv_pad, bias=False)
self.depthwise_norm = nn.BatchNorm2d(num_features=in_dim, eps=config.batch_norm_eps, momentum=config.batch_norm_momentum)
self.depthwise_act = ACT2FN[config.hidden_act]
def forward(self, hidden_states: torch.FloatTensor) -> torch.Tensor:
if self.stride == 2:
hidden_states = self.depthwise_conv_pad(hidden_states)
hidden_states = self.depthwise_conv(hidden_states)
hidden_states = self.depthwise_norm(hidden_states)
hidden_states = self.depthwise_act(hidden_states)
return hidden_states
|
class EfficientNetDepthwiseLayer(nn.Module):
'''
This corresponds to the depthwise convolution phase of each block in the original implementation.
'''
def __init__(self, config: EfficientNetConfig, in_dim: int, stride: int, kernel_size: int, adjust_padding: bool):
pass
def forward(self, hidden_states: torch.FloatTensor) -> torch.Tensor:
pass
| 3
| 1
| 16
| 2
| 14
| 1
| 2
| 0.14
| 1
| 6
| 2
| 0
| 2
| 5
| 2
| 12
| 37
| 5
| 28
| 17
| 18
| 4
| 17
| 10
| 14
| 2
| 1
| 1
| 4
|
2,287
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/efficientnet/modeling_efficientnet.py
|
transformers.models.efficientnet.modeling_efficientnet.EfficientNetEmbeddings
|
import torch
from ...activations import ACT2FN
from torch import nn
from .configuration_efficientnet import EfficientNetConfig
class EfficientNetEmbeddings(nn.Module):
"""
A module that corresponds to the stem module of the original work.
"""
def __init__(self, config: EfficientNetConfig):
super().__init__()
self.out_dim = round_filters(config, 32)
self.padding = nn.ZeroPad2d(padding=(0, 1, 0, 1))
self.convolution = nn.Conv2d(config.num_channels, self.out_dim, kernel_size=3, stride=2, padding='valid', bias=False)
self.batchnorm = nn.BatchNorm2d(self.out_dim, eps=config.batch_norm_eps, momentum=config.batch_norm_momentum)
self.activation = ACT2FN[config.hidden_act]
def forward(self, pixel_values: torch.Tensor) -> torch.Tensor:
features = self.padding(pixel_values)
features = self.convolution(features)
features = self.batchnorm(features)
features = self.activation(features)
return features
|
class EfficientNetEmbeddings(nn.Module):
'''
A module that corresponds to the stem module of the original work.
'''
def __init__(self, config: EfficientNetConfig):
pass
def forward(self, pixel_values: torch.Tensor) -> torch.Tensor:
pass
| 3
| 1
| 9
| 1
| 8
| 0
| 1
| 0.19
| 1
| 3
| 1
| 0
| 2
| 5
| 2
| 12
| 23
| 4
| 16
| 9
| 13
| 3
| 14
| 9
| 11
| 1
| 1
| 0
| 2
|
2,288
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/efficientnet/modeling_efficientnet.py
|
transformers.models.efficientnet.modeling_efficientnet.EfficientNetEncoder
|
from ...activations import ACT2FN
from ...modeling_outputs import BaseModelOutputWithNoAttention, BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
import torch
from .configuration_efficientnet import EfficientNetConfig
from typing import Optional, Union
import math
from torch import nn
class EfficientNetEncoder(nn.Module):
"""
Forward propagates the embeddings through each EfficientNet block.
Args:
config ([`EfficientNetConfig`]):
Model configuration class.
"""
def __init__(self, config: EfficientNetConfig):
super().__init__()
self.config = config
self.depth_coefficient = config.depth_coefficient
def round_repeats(repeats):
return int(math.ceil(self.depth_coefficient * repeats))
num_base_blocks = len(config.in_channels)
num_blocks = sum((round_repeats(n) for n in config.num_block_repeats))
curr_block_num = 0
blocks = []
for i in range(num_base_blocks):
in_dim = round_filters(config, config.in_channels[i])
out_dim = round_filters(config, config.out_channels[i])
stride = config.strides[i]
kernel_size = config.kernel_sizes[i]
expand_ratio = config.expand_ratios[i]
for j in range(round_repeats(config.num_block_repeats[i])):
id_skip = j == 0
stride = 1 if j > 0 else stride
in_dim = out_dim if j > 0 else in_dim
adjust_padding = curr_block_num not in config.depthwise_padding
drop_rate = config.drop_connect_rate * curr_block_num / num_blocks
block = EfficientNetBlock(config=config, in_dim=in_dim, out_dim=out_dim, stride=stride, kernel_size=kernel_size, expand_ratio=expand_ratio, drop_rate=drop_rate, id_skip=id_skip, adjust_padding=adjust_padding)
blocks.append(block)
curr_block_num += 1
self.blocks = nn.ModuleList(blocks)
self.top_conv = nn.Conv2d(in_channels=out_dim, out_channels=round_filters(config, 1280), kernel_size=1, padding='same', bias=False)
self.top_bn = nn.BatchNorm2d(num_features=config.hidden_dim, eps=config.batch_norm_eps, momentum=config.batch_norm_momentum)
self.top_activation = ACT2FN[config.hidden_act]
def forward(self, hidden_states: torch.FloatTensor, output_hidden_states: Optional[bool]=False, return_dict: Optional[bool]=True) -> BaseModelOutputWithNoAttention:
all_hidden_states = (hidden_states,) if output_hidden_states else None
for block in self.blocks:
hidden_states = block(hidden_states)
if output_hidden_states:
all_hidden_states += (hidden_states,)
hidden_states = self.top_conv(hidden_states)
hidden_states = self.top_bn(hidden_states)
hidden_states = self.top_activation(hidden_states)
if not return_dict:
return tuple((v for v in [hidden_states, all_hidden_states] if v is not None))
return BaseModelOutputWithNoAttention(last_hidden_state=hidden_states, hidden_states=all_hidden_states)
|
class EfficientNetEncoder(nn.Module):
'''
Forward propagates the embeddings through each EfficientNet block.
Args:
config ([`EfficientNetConfig`]):
Model configuration class.
'''
def __init__(self, config: EfficientNetConfig):
pass
def round_repeats(repeats):
pass
def forward(self, hidden_states: torch.FloatTensor, output_hidden_states: Optional[bool]=False, return_dict: Optional[bool]=True) -> BaseModelOutputWithNoAttention:
pass
| 4
| 1
| 27
| 3
| 23
| 1
| 4
| 0.1
| 1
| 8
| 3
| 0
| 2
| 6
| 2
| 12
| 88
| 13
| 68
| 32
| 59
| 7
| 42
| 27
| 38
| 7
| 1
| 2
| 13
|
2,289
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/efficientnet/modeling_efficientnet.py
|
transformers.models.efficientnet.modeling_efficientnet.EfficientNetExpansionLayer
|
from ...activations import ACT2FN
from torch import nn
import torch
from .configuration_efficientnet import EfficientNetConfig
class EfficientNetExpansionLayer(nn.Module):
"""
This corresponds to the expansion phase of each block in the original implementation.
"""
def __init__(self, config: EfficientNetConfig, in_dim: int, out_dim: int, stride: int):
super().__init__()
self.expand_conv = nn.Conv2d(in_channels=in_dim, out_channels=out_dim, kernel_size=1, padding='same', bias=False)
self.expand_bn = nn.BatchNorm2d(num_features=out_dim, eps=config.batch_norm_eps)
self.expand_act = ACT2FN[config.hidden_act]
def forward(self, hidden_states: torch.FloatTensor) -> torch.Tensor:
hidden_states = self.expand_conv(hidden_states)
hidden_states = self.expand_bn(hidden_states)
hidden_states = self.expand_act(hidden_states)
return hidden_states
|
class EfficientNetExpansionLayer(nn.Module):
'''
This corresponds to the expansion phase of each block in the original implementation.
'''
def __init__(self, config: EfficientNetConfig, in_dim: int, out_dim: int, stride: int):
pass
def forward(self, hidden_states: torch.FloatTensor) -> torch.Tensor:
pass
| 3
| 1
| 9
| 1
| 8
| 1
| 1
| 0.24
| 1
| 4
| 1
| 0
| 2
| 3
| 2
| 12
| 24
| 3
| 17
| 6
| 14
| 4
| 11
| 6
| 8
| 1
| 1
| 0
| 2
|
2,290
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/efficientnet/modeling_efficientnet.py
|
transformers.models.efficientnet.modeling_efficientnet.EfficientNetFinalBlockLayer
|
from .configuration_efficientnet import EfficientNetConfig
from torch import nn
import torch
class EfficientNetFinalBlockLayer(nn.Module):
"""
This corresponds to the final phase of each block in the original implementation.
"""
def __init__(self, config: EfficientNetConfig, in_dim: int, out_dim: int, stride: int, drop_rate: float, id_skip: bool):
super().__init__()
self.apply_dropout = stride == 1 and (not id_skip)
self.project_conv = nn.Conv2d(in_channels=in_dim, out_channels=out_dim, kernel_size=1, padding='same', bias=False)
self.project_bn = nn.BatchNorm2d(num_features=out_dim, eps=config.batch_norm_eps, momentum=config.batch_norm_momentum)
self.dropout = nn.Dropout(p=drop_rate)
def forward(self, embeddings: torch.FloatTensor, hidden_states: torch.FloatTensor) -> torch.Tensor:
hidden_states = self.project_conv(hidden_states)
hidden_states = self.project_bn(hidden_states)
if self.apply_dropout:
hidden_states = self.dropout(hidden_states)
hidden_states = hidden_states + embeddings
return hidden_states
|
class EfficientNetFinalBlockLayer(nn.Module):
'''
This corresponds to the final phase of each block in the original implementation.
'''
def __init__(self, config: EfficientNetConfig, in_dim: int, out_dim: int, stride: int, drop_rate: float, id_skip: bool):
pass
def forward(self, embeddings: torch.FloatTensor, hidden_states: torch.FloatTensor) -> torch.Tensor:
pass
| 3
| 1
| 13
| 1
| 12
| 0
| 2
| 0.13
| 1
| 6
| 1
| 0
| 2
| 4
| 2
| 12
| 31
| 4
| 24
| 9
| 19
| 3
| 14
| 7
| 11
| 2
| 1
| 1
| 3
|
2,291
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/efficientnet/modeling_efficientnet.py
|
transformers.models.efficientnet.modeling_efficientnet.EfficientNetForImageClassification
|
from torch import nn
import torch
from ...utils import auto_docstring, logging
from ...modeling_outputs import BaseModelOutputWithNoAttention, BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from typing import Optional, Union
@auto_docstring(custom_intro='\n EfficientNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g.\n for ImageNet.\n ')
class EfficientNetForImageClassification(EfficientNetPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.config = config
self.efficientnet = EfficientNetModel(config)
self.dropout = nn.Dropout(p=config.dropout_rate)
self.classifier = nn.Linear(config.hidden_dim, self.num_labels) if self.num_labels > 0 else nn.Identity()
self.post_init()
@auto_docstring
def forward(self, pixel_values: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, ImageClassifierOutputWithNoAttention]:
"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.efficientnet(pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict)
pooled_output = outputs.pooler_output if return_dict else outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
loss = None
if labels is not None:
loss = self.loss_function(labels, logits, self.config)
if not return_dict:
output = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=loss, logits=logits, hidden_states=outputs.hidden_states)
|
@auto_docstring(custom_intro='\n EfficientNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g.\n for ImageNet.\n ')
class EfficientNetForImageClassification(EfficientNetPreTrainedModel):
def __init__(self, config):
pass
@auto_docstring
def forward(self, pixel_values: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, ImageClassifierOutputWithNoAttention]:
'''
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
'''
pass
| 5
| 1
| 32
| 4
| 25
| 4
| 8
| 0.14
| 1
| 5
| 2
| 0
| 2
| 5
| 2
| 3
| 73
| 8
| 57
| 21
| 41
| 8
| 35
| 14
| 32
| 13
| 2
| 3
| 15
|
2,292
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/efficientnet/modeling_efficientnet.py
|
transformers.models.efficientnet.modeling_efficientnet.EfficientNetModel
|
from .configuration_efficientnet import EfficientNetConfig
from torch import nn
from ...utils import auto_docstring, logging
from typing import Optional, Union
import torch
from ...modeling_outputs import BaseModelOutputWithNoAttention, BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
@auto_docstring
class EfficientNetModel(EfficientNetPreTrainedModel):
def __init__(self, config: EfficientNetConfig):
super().__init__(config)
self.config = config
self.embeddings = EfficientNetEmbeddings(config)
self.encoder = EfficientNetEncoder(config)
if config.pooling_type == 'mean':
self.pooler = nn.AvgPool2d(config.hidden_dim, ceil_mode=True)
elif config.pooling_type == 'max':
self.pooler = nn.MaxPool2d(config.hidden_dim, ceil_mode=True)
else:
raise ValueError(f"config.pooling must be one of ['mean', 'max'] got {config.pooling}")
self.post_init()
@auto_docstring
def forward(self, pixel_values: Optional[torch.FloatTensor]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutputWithPoolingAndNoAttention]:
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('You have to specify pixel_values')
embedding_output = self.embeddings(pixel_values)
encoder_outputs = self.encoder(embedding_output, output_hidden_states=output_hidden_states, return_dict=return_dict)
last_hidden_state = encoder_outputs[0]
pooled_output = self.pooler(last_hidden_state)
pooled_output = pooled_output.reshape(pooled_output.shape[:2])
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(last_hidden_state=last_hidden_state, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states)
|
@auto_docstring
class EfficientNetModel(EfficientNetPreTrainedModel):
def __init__(self, config: EfficientNetConfig):
pass
@auto_docstring
def forward(self, pixel_values: Optional[torch.FloatTensor]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutputWithPoolingAndNoAttention]:
pass
| 5
| 0
| 26
| 4
| 20
| 2
| 4
| 0.08
| 1
| 7
| 4
| 0
| 2
| 4
| 2
| 3
| 61
| 8
| 49
| 17
| 33
| 4
| 24
| 11
| 21
| 5
| 2
| 1
| 8
|
2,293
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/efficientnet/modeling_efficientnet.py
|
transformers.models.efficientnet.modeling_efficientnet.EfficientNetPreTrainedModel
|
from ...modeling_utils import PreTrainedModel
from torch import nn
from .configuration_efficientnet import EfficientNetConfig
from ...utils import auto_docstring, logging
@auto_docstring
class EfficientNetPreTrainedModel(PreTrainedModel):
config: EfficientNetConfig
base_model_prefix = 'efficientnet'
main_input_name = 'pixel_values'
_no_split_modules = []
def _init_weights(self, module: nn.Module):
"""Initialize the weights"""
if isinstance(module, (nn.Linear, nn.Conv2d, nn.BatchNorm2d)):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
|
@auto_docstring
class EfficientNetPreTrainedModel(PreTrainedModel):
def _init_weights(self, module: nn.Module):
'''Initialize the weights'''
pass
| 3
| 1
| 11
| 0
| 8
| 3
| 4
| 0.54
| 1
| 0
| 0
| 2
| 1
| 0
| 1
| 1
| 22
| 2
| 13
| 6
| 11
| 7
| 12
| 6
| 10
| 4
| 1
| 2
| 4
|
2,294
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/efficientnet/modeling_efficientnet.py
|
transformers.models.efficientnet.modeling_efficientnet.EfficientNetSqueezeExciteLayer
|
from ...activations import ACT2FN
from torch import nn
import torch
from .configuration_efficientnet import EfficientNetConfig
class EfficientNetSqueezeExciteLayer(nn.Module):
"""
This corresponds to the Squeeze and Excitement phase of each block in the original implementation.
"""
def __init__(self, config: EfficientNetConfig, in_dim: int, expand_dim: int, expand: bool=False):
super().__init__()
self.dim = expand_dim if expand else in_dim
self.dim_se = max(1, int(in_dim * config.squeeze_expansion_ratio))
self.squeeze = nn.AdaptiveAvgPool2d(output_size=1)
self.reduce = nn.Conv2d(in_channels=self.dim, out_channels=self.dim_se, kernel_size=1, padding='same')
self.expand = nn.Conv2d(in_channels=self.dim_se, out_channels=self.dim, kernel_size=1, padding='same')
self.act_reduce = ACT2FN[config.hidden_act]
self.act_expand = nn.Sigmoid()
def forward(self, hidden_states: torch.FloatTensor) -> torch.Tensor:
inputs = hidden_states
hidden_states = self.squeeze(hidden_states)
hidden_states = self.reduce(hidden_states)
hidden_states = self.act_reduce(hidden_states)
hidden_states = self.expand(hidden_states)
hidden_states = self.act_expand(hidden_states)
hidden_states = torch.mul(inputs, hidden_states)
return hidden_states
|
class EfficientNetSqueezeExciteLayer(nn.Module):
'''
This corresponds to the Squeeze and Excitement phase of each block in the original implementation.
'''
def __init__(self, config: EfficientNetConfig, in_dim: int, expand_dim: int, expand: bool=False):
pass
def forward(self, hidden_states: torch.FloatTensor) -> torch.Tensor:
pass
| 3
| 1
| 16
| 2
| 14
| 0
| 2
| 0.1
| 1
| 5
| 1
| 0
| 2
| 7
| 2
| 12
| 37
| 5
| 29
| 11
| 26
| 3
| 19
| 11
| 16
| 2
| 1
| 0
| 3
|
2,295
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/electra/configuration_electra.py
|
transformers.models.electra.configuration_electra.ElectraConfig
|
from ...configuration_utils import PretrainedConfig
class ElectraConfig(PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`ElectraModel`] or a [`TFElectraModel`]. It is
used to instantiate a ELECTRA model according to the specified arguments, defining the model architecture.
Instantiating a configuration with the defaults will yield a similar configuration to that of the ELECTRA
[google/electra-small-discriminator](https://huggingface.co/google/electra-small-discriminator) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 30522):
Vocabulary size of the ELECTRA model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`ElectraModel`] or [`TFElectraModel`].
embedding_size (`int`, *optional*, defaults to 128):
Dimensionality of the encoder layers and the pooler layer.
hidden_size (`int`, *optional*, defaults to 256):
Dimensionality of the encoder layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 4):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (`int`, *optional*, defaults to 1024):
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"silu"` and `"gelu_new"` are supported.
hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout ratio for the attention probabilities.
max_position_embeddings (`int`, *optional*, defaults to 512):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
type_vocab_size (`int`, *optional*, defaults to 2):
The vocabulary size of the `token_type_ids` passed when calling [`ElectraModel`] or [`TFElectraModel`].
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
The epsilon used by the layer normalization layers.
summary_type (`str`, *optional*, defaults to `"first"`):
Argument used when doing sequence summary. Used in the sequence classification and multiple choice models.
Has to be one of the following options:
- `"last"`: Take the last token hidden state (like XLNet).
- `"first"`: Take the first token hidden state (like BERT).
- `"mean"`: Take the mean of all tokens hidden states.
- `"cls_index"`: Supply a Tensor of classification token position (like GPT/GPT-2).
- `"attn"`: Not implemented now, use multi-head attention.
summary_use_proj (`bool`, *optional*, defaults to `True`):
Argument used when doing sequence summary. Used in the sequence classification and multiple choice models.
Whether or not to add a projection after the vector extraction.
summary_activation (`str`, *optional*):
Argument used when doing sequence summary. Used in the sequence classification and multiple choice models.
Pass `"gelu"` for a gelu activation to the output, any other value will result in no activation.
summary_last_dropout (`float`, *optional*, defaults to 0.0):
Argument used when doing sequence summary. Used in the sequence classification and multiple choice models.
The dropout ratio to be used after the projection and activation.
position_embedding_type (`str`, *optional*, defaults to `"absolute"`):
Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For
positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to
[Self-Attention with Relative Position Representations (Shaw et al.)](https://huggingface.co/papers/1803.02155).
For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models
with Better Relative Position Embeddings (Huang et al.)](https://huggingface.co/papers/2009.13658).
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
classifier_dropout (`float`, *optional*):
The dropout ratio for the classification head.
Examples:
```python
>>> from transformers import ElectraConfig, ElectraModel
>>> # Initializing a ELECTRA electra-base-uncased style configuration
>>> configuration = ElectraConfig()
>>> # Initializing a model (with random weights) from the electra-base-uncased style configuration
>>> model = ElectraModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = 'electra'
def __init__(self, vocab_size=30522, embedding_size=128, hidden_size=256, num_hidden_layers=12, num_attention_heads=4, intermediate_size=1024, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, summary_type='first', summary_use_proj=True, summary_activation='gelu', summary_last_dropout=0.1, pad_token_id=0, position_embedding_type='absolute', use_cache=True, classifier_dropout=None, **kwargs):
super().__init__(pad_token_id=pad_token_id, **kwargs)
self.vocab_size = vocab_size
self.embedding_size = embedding_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.summary_type = summary_type
self.summary_use_proj = summary_use_proj
self.summary_activation = summary_activation
self.summary_last_dropout = summary_last_dropout
self.position_embedding_type = position_embedding_type
self.use_cache = use_cache
self.classifier_dropout = classifier_dropout
|
class ElectraConfig(PretrainedConfig):
'''
This is the configuration class to store the configuration of a [`ElectraModel`] or a [`TFElectraModel`]. It is
used to instantiate a ELECTRA model according to the specified arguments, defining the model architecture.
Instantiating a configuration with the defaults will yield a similar configuration to that of the ELECTRA
[google/electra-small-discriminator](https://huggingface.co/google/electra-small-discriminator) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 30522):
Vocabulary size of the ELECTRA model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`ElectraModel`] or [`TFElectraModel`].
embedding_size (`int`, *optional*, defaults to 128):
Dimensionality of the encoder layers and the pooler layer.
hidden_size (`int`, *optional*, defaults to 256):
Dimensionality of the encoder layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 4):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (`int`, *optional*, defaults to 1024):
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"silu"` and `"gelu_new"` are supported.
hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout ratio for the attention probabilities.
max_position_embeddings (`int`, *optional*, defaults to 512):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
type_vocab_size (`int`, *optional*, defaults to 2):
The vocabulary size of the `token_type_ids` passed when calling [`ElectraModel`] or [`TFElectraModel`].
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
The epsilon used by the layer normalization layers.
summary_type (`str`, *optional*, defaults to `"first"`):
Argument used when doing sequence summary. Used in the sequence classification and multiple choice models.
Has to be one of the following options:
- `"last"`: Take the last token hidden state (like XLNet).
- `"first"`: Take the first token hidden state (like BERT).
- `"mean"`: Take the mean of all tokens hidden states.
- `"cls_index"`: Supply a Tensor of classification token position (like GPT/GPT-2).
- `"attn"`: Not implemented now, use multi-head attention.
summary_use_proj (`bool`, *optional*, defaults to `True`):
Argument used when doing sequence summary. Used in the sequence classification and multiple choice models.
Whether or not to add a projection after the vector extraction.
summary_activation (`str`, *optional*):
Argument used when doing sequence summary. Used in the sequence classification and multiple choice models.
Pass `"gelu"` for a gelu activation to the output, any other value will result in no activation.
summary_last_dropout (`float`, *optional*, defaults to 0.0):
Argument used when doing sequence summary. Used in the sequence classification and multiple choice models.
The dropout ratio to be used after the projection and activation.
position_embedding_type (`str`, *optional*, defaults to `"absolute"`):
Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For
positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to
[Self-Attention with Relative Position Representations (Shaw et al.)](https://huggingface.co/papers/1803.02155).
For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models
with Better Relative Position Embeddings (Huang et al.)](https://huggingface.co/papers/2009.13658).
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
classifier_dropout (`float`, *optional*):
The dropout ratio for the classification head.
Examples:
```python
>>> from transformers import ElectraConfig, ElectraModel
>>> # Initializing a ELECTRA electra-base-uncased style configuration
>>> configuration = ElectraConfig()
>>> # Initializing a model (with random weights) from the electra-base-uncased style configuration
>>> model = ElectraModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```'''
def __init__(self, vocab_size=30522, embedding_size=128, hidden_size=256, num_hidden_layers=12, num_attention_heads=4, intermediate_size=1024, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, summary_type='first', summary_use_proj=True, summary_activation='gelu', summary_last_dropout=0.1, pad_token_id=0, position_embedding_type='absolute', use_cache=True, classifier_dropout=None, **kwargs):
pass
| 2
| 1
| 48
| 2
| 46
| 0
| 1
| 1.56
| 1
| 1
| 0
| 0
| 1
| 20
| 1
| 1
| 140
| 17
| 48
| 47
| 22
| 75
| 24
| 23
| 22
| 1
| 1
| 0
| 1
|
2,296
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/electra/configuration_electra.py
|
transformers.models.electra.configuration_electra.ElectraOnnxConfig
|
from ...onnx import OnnxConfig
from collections.abc import Mapping
from collections import OrderedDict
class ElectraOnnxConfig(OnnxConfig):
@property
def inputs(self) -> Mapping[str, Mapping[int, str]]:
if self.task == 'multiple-choice':
dynamic_axis = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
dynamic_axis = {0: 'batch', 1: 'sequence'}
return OrderedDict([('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis)])
|
class ElectraOnnxConfig(OnnxConfig):
@property
def inputs(self) -> Mapping[str, Mapping[int, str]]:
pass
| 3
| 0
| 12
| 0
| 12
| 0
| 2
| 0
| 1
| 3
| 0
| 0
| 1
| 0
| 1
| 1
| 14
| 0
| 14
| 4
| 11
| 0
| 6
| 3
| 4
| 2
| 1
| 1
| 2
|
2,297
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/electra/modeling_electra.py
|
transformers.models.electra.modeling_electra.ElectraAttention
|
from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
from typing import Callable, Optional, Union
from torch import nn
from ...utils import ModelOutput, TransformersKwargs, auto_docstring, is_torch_flex_attn_available, logging
from ...processing_utils import Unpack
from ...cache_utils import Cache, EncoderDecoderCache
import torch
class ElectraAttention(nn.Module):
def __init__(self, config, position_embedding_type=None, is_causal=False, layer_idx=None, is_cross_attention=False):
super().__init__()
self.is_cross_attention = is_cross_attention
attention_class = ElectraCrossAttention if is_cross_attention else ElectraSelfAttention
self.self = attention_class(config, position_embedding_type=position_embedding_type, is_causal=is_causal, layer_idx=layer_idx)
self.output = ElectraSelfOutput(config)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads)
self.self.query = prune_linear_layer(self.self.query, index)
self.self.key = prune_linear_layer(self.self.key, index)
self.self.value = prune_linear_layer(self.self.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_value: Optional[Cache]=None, cache_position: Optional[torch.Tensor]=None, **kwargs: Unpack[TransformersKwargs]) -> tuple[torch.Tensor]:
attention_mask = attention_mask if not self.is_cross_attention else encoder_attention_mask
attention_output, attn_weights = self.self(hidden_states, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask, head_mask=head_mask, past_key_value=past_key_value, cache_position=cache_position, **kwargs)
attention_output = self.output(attention_output, hidden_states)
return (attention_output, attn_weights)
|
class ElectraAttention(nn.Module):
def __init__(self, config, position_embedding_type=None, is_causal=False, layer_idx=None, is_cross_attention=False):
pass
def prune_heads(self, heads):
pass
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_value: Optional[Cache]=None, cache_position: Optional[torch.Tensor]=None, **kwargs: Unpack[TransformersKwargs]) -> tuple[torch.Tensor]:
pass
| 4
| 0
| 15
| 1
| 14
| 1
| 1
| 0.07
| 1
| 5
| 1
| 0
| 3
| 3
| 3
| 13
| 49
| 4
| 43
| 20
| 30
| 3
| 22
| 11
| 18
| 2
| 1
| 1
| 4
|
2,298
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/electra/modeling_electra.py
|
transformers.models.electra.modeling_electra.ElectraClassificationHead
|
from ...activations import ACT2FN, get_activation
from torch import nn
class ElectraClassificationHead(nn.Module):
"""Head for sentence-level classification tasks."""
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
classifier_dropout = config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
self.activation = get_activation('gelu')
self.dropout = nn.Dropout(classifier_dropout)
self.out_proj = nn.Linear(config.hidden_size, config.num_labels)
def forward(self, features, **kwargs):
x = features[:, 0, :]
x = self.dropout(x)
x = self.dense(x)
x = self.activation(x)
x = self.dropout(x)
x = self.out_proj(x)
return x
|
class ElectraClassificationHead(nn.Module):
'''Head for sentence-level classification tasks.'''
def __init__(self, config):
pass
def forward(self, features, **kwargs):
pass
| 3
| 1
| 9
| 0
| 9
| 1
| 2
| 0.17
| 1
| 1
| 0
| 0
| 2
| 4
| 2
| 12
| 21
| 2
| 18
| 9
| 15
| 3
| 16
| 9
| 13
| 2
| 1
| 0
| 3
|
2,299
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/electra/modeling_electra.py
|
transformers.models.electra.modeling_electra.ElectraDiscriminatorPredictions
|
from ...activations import ACT2FN, get_activation
from torch import nn
class ElectraDiscriminatorPredictions(nn.Module):
"""Prediction module for the discriminator, made up of two dense layers."""
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = get_activation(config.hidden_act)
self.dense_prediction = nn.Linear(config.hidden_size, 1)
self.config = config
def forward(self, discriminator_hidden_states):
hidden_states = self.dense(discriminator_hidden_states)
hidden_states = self.activation(hidden_states)
logits = self.dense_prediction(hidden_states).squeeze(-1)
return logits
|
class ElectraDiscriminatorPredictions(nn.Module):
'''Prediction module for the discriminator, made up of two dense layers.'''
def __init__(self, config):
pass
def forward(self, discriminator_hidden_states):
pass
| 3
| 1
| 7
| 1
| 6
| 0
| 1
| 0.08
| 1
| 1
| 0
| 0
| 2
| 4
| 2
| 12
| 17
| 4
| 12
| 9
| 9
| 1
| 12
| 9
| 9
| 1
| 1
| 0
| 2
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.