text
stringlengths
1
1.02k
class_index
int64
0
10.8k
source
stringlengths
85
188
self.feature_layer_norm_eps = feature_layer_norm_eps self.initializer_range = initializer_range self.vocab_size = vocab_size
2,858
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/sew_d/configuration_sew_d.py
if ( (len(self.conv_stride) != self.num_feat_extract_layers) or (len(self.conv_kernel) != self.num_feat_extract_layers) or (len(self.conv_dim) != self.num_feat_extract_layers) ): raise ValueError( "Configuration for convolutional layers is incorrect. " "It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`, " f"but is `len(config.conv_dim) = {len(self.conv_dim)}`, `len(config.conv_stride) " f"= {len(self.conv_stride)}`, `len(config.conv_kernel) = {len(self.conv_kernel)}`." )
2,858
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/sew_d/configuration_sew_d.py
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 self.apply_spec_augment = apply_spec_augment self.mask_time_prob = mask_time_prob self.mask_time_length = mask_time_length self.mask_time_min_masks = mask_time_min_masks self.mask_feature_prob = mask_feature_prob self.mask_feature_length = mask_feature_length self.mask_feature_min_masks = mask_feature_min_masks # ctc loss self.ctc_loss_reduction = ctc_loss_reduction self.ctc_zero_infinity = ctc_zero_infinity # sequence classification self.use_weighted_layer_sum = use_weighted_layer_sum self.classifier_proj_size = classifier_proj_size @property def inputs_to_logits_ratio(self): return functools.reduce(operator.mul, self.conv_stride, 1)
2,858
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/sew_d/configuration_sew_d.py
def to_dict(self): """ Serializes this instance to a Python dictionary. """ output = super().to_dict() output["hidden_dropout"] = output.pop("_hidden_dropout") return output
2,858
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/sew_d/configuration_sew_d.py
class TableTransformerDecoderOutput(BaseModelOutputWithCrossAttentions): """ Base class for outputs of the TABLE_TRANSFORMER decoder. This class adds one attribute to BaseModelOutputWithCrossAttentions, namely an optional stack of intermediate decoder activations, i.e. the output of each decoder layer, each of them gone through a layernorm. This is useful when training the model with auxiliary decoding losses.
2,859
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/table_transformer/modeling_table_transformer.py
Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
2,859
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/table_transformer/modeling_table_transformer.py
sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` and `config.add_cross_attention=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads. intermediate_hidden_states (`torch.FloatTensor` of shape `(config.decoder_layers, batch_size, num_queries, hidden_size)`, *optional*, returned when `config.auxiliary_loss=True`): Intermediate decoder activations, i.e. the output of each decoder layer, each of them gone through a layernorm. """
2,859
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/table_transformer/modeling_table_transformer.py
intermediate_hidden_states: Optional[torch.FloatTensor] = None
2,859
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/table_transformer/modeling_table_transformer.py
class TableTransformerModelOutput(Seq2SeqModelOutput): """ Base class for outputs of the TABLE_TRANSFORMER encoder-decoder model. This class adds one attribute to Seq2SeqModelOutput, namely an optional stack of intermediate decoder activations, i.e. the output of each decoder layer, each of them gone through a layernorm. This is useful when training the model with auxiliary decoding losses.
2,860
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/table_transformer/modeling_table_transformer.py
Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the decoder of the model. decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the decoder at the output of each layer plus the initial embedding outputs. decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
2,860
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/table_transformer/modeling_table_transformer.py
sequence_length)`. Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads. cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads. encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder of the model.
2,860
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/table_transformer/modeling_table_transformer.py
encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the encoder at the output of each layer plus the initial embedding outputs. encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads.
2,860
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/table_transformer/modeling_table_transformer.py
intermediate_hidden_states (`torch.FloatTensor` of shape `(config.decoder_layers, batch_size, sequence_length, hidden_size)`, *optional*, returned when `config.auxiliary_loss=True`): Intermediate decoder activations, i.e. the output of each decoder layer, each of them gone through a layernorm. """
2,860
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/table_transformer/modeling_table_transformer.py
intermediate_hidden_states: Optional[torch.FloatTensor] = None
2,860
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/table_transformer/modeling_table_transformer.py
class TableTransformerObjectDetectionOutput(ModelOutput): """ Output type of [`TableTransformerForObjectDetection`].
2,861
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/table_transformer/modeling_table_transformer.py
Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` are provided)): Total loss as a linear combination of a negative log-likehood (cross-entropy) for class prediction and a bounding box loss. The latter is defined as a linear combination of the L1 loss and the generalized scale-invariant IoU loss. loss_dict (`Dict`, *optional*): A dictionary containing the individual losses. Useful for logging. logits (`torch.FloatTensor` of shape `(batch_size, num_queries, num_classes + 1)`): Classification logits (including no-object) for all queries. pred_boxes (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)`): Normalized boxes coordinates for all queries, represented as (center_x, center_y, width, height). These values are normalized in [0, 1], relative to the size of each individual image in the batch (disregarding
2,861
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/table_transformer/modeling_table_transformer.py
possible padding). You can use [`~TableTransformerImageProcessor.post_process_object_detection`] to retrieve the unnormalized bounding boxes. auxiliary_outputs (`list[Dict]`, *optional*): Optional, only returned when auxilary losses are activated (i.e. `config.auxiliary_loss` is set to `True`) and labels are provided. It is a list of dictionaries containing the two above keys (`logits` and `pred_boxes`) for each decoder layer. last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the decoder of the model. decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
2,861
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/table_transformer/modeling_table_transformer.py
shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the decoder at the output of each layer plus the initial embedding outputs. decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads. cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax,
2,861
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/table_transformer/modeling_table_transformer.py
used to compute the weighted average in the cross-attention heads. encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder of the model. encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the encoder at the output of each layer plus the initial embedding outputs. encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
2,861
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/table_transformer/modeling_table_transformer.py
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads. """
2,861
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/table_transformer/modeling_table_transformer.py
loss: Optional[torch.FloatTensor] = None loss_dict: Optional[Dict] = None logits: torch.FloatTensor = None pred_boxes: torch.FloatTensor = None auxiliary_outputs: Optional[List[Dict]] = None last_hidden_state: Optional[torch.FloatTensor] = None decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None decoder_attentions: Optional[Tuple[torch.FloatTensor]] = None cross_attentions: Optional[Tuple[torch.FloatTensor]] = None encoder_last_hidden_state: Optional[torch.FloatTensor] = None encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
2,861
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/table_transformer/modeling_table_transformer.py
class TableTransformerFrozenBatchNorm2d(nn.Module): """ BatchNorm2d where the batch statistics and the affine parameters are fixed. Copy-paste from torchvision.misc.ops with added eps before rqsrt, without which any other models than torchvision.models.resnet[18,34,50,101] produce nans. """ def __init__(self, n): super().__init__() self.register_buffer("weight", torch.ones(n)) self.register_buffer("bias", torch.zeros(n)) self.register_buffer("running_mean", torch.zeros(n)) self.register_buffer("running_var", torch.ones(n)) def _load_from_state_dict( self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs ): num_batches_tracked_key = prefix + "num_batches_tracked" if num_batches_tracked_key in state_dict: del state_dict[num_batches_tracked_key]
2,862
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/table_transformer/modeling_table_transformer.py
super()._load_from_state_dict( state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs ) def forward(self, x): # move reshapes to the beginning # to make it user-friendly weight = self.weight.reshape(1, -1, 1, 1) bias = self.bias.reshape(1, -1, 1, 1) running_var = self.running_var.reshape(1, -1, 1, 1) running_mean = self.running_mean.reshape(1, -1, 1, 1) epsilon = 1e-5 scale = weight * (running_var + epsilon).rsqrt() bias = bias - running_mean * scale return x * scale + bias
2,862
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/table_transformer/modeling_table_transformer.py
class TableTransformerConvEncoder(nn.Module): """ Convolutional backbone, using either the AutoBackbone API or one from the timm library. nn.BatchNorm2d layers are replaced by TableTransformerFrozenBatchNorm2d as defined above. """ def __init__(self, config): super().__init__() self.config = config
2,863
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/table_transformer/modeling_table_transformer.py
# For backwards compatibility we have to use the timm library directly instead of the AutoBackbone API if config.use_timm_backbone: # We default to values which were previously hard-coded. This enables configurability from the config # using backbone arguments, while keeping the default behavior the same. requires_backends(self, ["timm"]) kwargs = getattr(config, "backbone_kwargs", {}) kwargs = {} if kwargs is None else kwargs.copy() out_indices = kwargs.pop("out_indices", (1, 2, 3, 4)) num_channels = kwargs.pop("in_chans", config.num_channels) if config.dilation: kwargs["output_stride"] = kwargs.get("output_stride", 16) backbone = create_model( config.backbone, pretrained=config.use_pretrained_backbone, features_only=True, out_indices=out_indices, in_chans=num_channels,
2,863
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/table_transformer/modeling_table_transformer.py
**kwargs, ) else: backbone = load_backbone(config)
2,863
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/table_transformer/modeling_table_transformer.py
# replace batch norm by frozen batch norm with torch.no_grad(): replace_batch_norm(backbone) self.model = backbone self.intermediate_channel_sizes = ( self.model.feature_info.channels() if config.use_timm_backbone else self.model.channels ) backbone_model_type = None if config.backbone is not None: backbone_model_type = config.backbone elif config.backbone_config is not None: backbone_model_type = config.backbone_config.model_type else: raise ValueError("Either `backbone` or `backbone_config` should be provided in the config")
2,863
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/table_transformer/modeling_table_transformer.py
if "resnet" in backbone_model_type: for name, parameter in self.model.named_parameters(): if config.use_timm_backbone: if "layer2" not in name and "layer3" not in name and "layer4" not in name: parameter.requires_grad_(False) else: if "stage.1" not in name and "stage.2" not in name and "stage.3" not in name: parameter.requires_grad_(False) def forward(self, pixel_values: torch.Tensor, pixel_mask: torch.Tensor): # send pixel_values through the model to get list of feature maps features = self.model(pixel_values) if self.config.use_timm_backbone else self.model(pixel_values).feature_maps
2,863
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/table_transformer/modeling_table_transformer.py
out = [] for feature_map in features: # downsample pixel_mask to match shape of corresponding feature_map mask = nn.functional.interpolate(pixel_mask[None].float(), size=feature_map.shape[-2:]).to(torch.bool)[0] out.append((feature_map, mask)) return out
2,863
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/table_transformer/modeling_table_transformer.py
class TableTransformerConvModel(nn.Module): """ This module adds 2D position embeddings to all intermediate feature maps of the convolutional encoder. """ def __init__(self, conv_encoder, position_embedding): super().__init__() self.conv_encoder = conv_encoder self.position_embedding = position_embedding def forward(self, pixel_values, pixel_mask): # send pixel_values and pixel_mask through backbone to get list of (feature_map, pixel_mask) tuples out = self.conv_encoder(pixel_values, pixel_mask) pos = [] for feature_map, mask in out: # position encoding pos.append(self.position_embedding(feature_map, mask).to(feature_map.dtype)) return out, pos
2,864
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/table_transformer/modeling_table_transformer.py
class TableTransformerSinePositionEmbedding(nn.Module): """ This is a more standard version of the position embedding, very similar to the one used by the Attention is all you need paper, generalized to work on images. """ def __init__(self, embedding_dim=64, temperature=10000, normalize=False, scale=None): super().__init__() self.embedding_dim = embedding_dim self.temperature = temperature self.normalize = normalize if scale is not None and normalize is False: raise ValueError("normalize should be True if scale is passed") if scale is None: scale = 2 * math.pi self.scale = scale
2,865
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/table_transformer/modeling_table_transformer.py
def forward(self, pixel_values, pixel_mask): if pixel_mask is None: raise ValueError("No pixel mask provided") y_embed = pixel_mask.cumsum(1, dtype=torch.float32) x_embed = pixel_mask.cumsum(2, dtype=torch.float32) if self.normalize: y_embed = y_embed / (y_embed[:, -1:, :] + 1e-6) * self.scale x_embed = x_embed / (x_embed[:, :, -1:] + 1e-6) * self.scale dim_t = torch.arange(self.embedding_dim, dtype=torch.int64, device=pixel_values.device).float() dim_t = self.temperature ** (2 * torch.div(dim_t, 2, rounding_mode="floor") / self.embedding_dim)
2,865
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/table_transformer/modeling_table_transformer.py
pos_x = x_embed[:, :, :, None] / dim_t pos_y = y_embed[:, :, :, None] / dim_t pos_x = torch.stack((pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4).flatten(3) pos_y = torch.stack((pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4).flatten(3) pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2) return pos
2,865
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/table_transformer/modeling_table_transformer.py
class TableTransformerLearnedPositionEmbedding(nn.Module): """ This module learns positional embeddings up to a fixed maximum size. """ def __init__(self, embedding_dim=256): super().__init__() self.row_embeddings = nn.Embedding(50, embedding_dim) self.column_embeddings = nn.Embedding(50, embedding_dim) def forward(self, pixel_values, pixel_mask=None): height, width = pixel_values.shape[-2:] width_values = torch.arange(width, device=pixel_values.device) height_values = torch.arange(height, device=pixel_values.device) x_emb = self.column_embeddings(width_values) y_emb = self.row_embeddings(height_values) pos = torch.cat([x_emb.unsqueeze(0).repeat(height, 1, 1), y_emb.unsqueeze(1).repeat(1, width, 1)], dim=-1) pos = pos.permute(2, 0, 1) pos = pos.unsqueeze(0) pos = pos.repeat(pixel_values.shape[0], 1, 1, 1) return pos
2,866
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/table_transformer/modeling_table_transformer.py
class TableTransformerAttention(nn.Module): """ Multi-headed attention from 'Attention Is All You Need' paper. Here, we add position embeddings to the queries and keys (as explained in the TABLE_TRANSFORMER paper). """ def __init__( self, embed_dim: int, num_heads: int, dropout: float = 0.0, bias: bool = True, ): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads if self.head_dim * num_heads != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:" f" {num_heads})." ) self.scaling = self.head_dim**-0.5
2,867
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/table_transformer/modeling_table_transformer.py
self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) def _shape(self, tensor: torch.Tensor, seq_len: int, batch_size: int): return tensor.view(batch_size, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() def with_pos_embed(self, tensor: torch.Tensor, object_queries: Optional[Tensor]): return tensor if object_queries is None else tensor + object_queries
2,867
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/table_transformer/modeling_table_transformer.py
def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, object_queries: Optional[torch.Tensor] = None, key_value_states: Optional[torch.Tensor] = None, spatial_position_embeddings: Optional[torch.Tensor] = None, output_attentions: bool = False, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: """Input shape: Batch x Time x Channel""" # if key_value_states are provided this layer is used as a cross-attention layer # for the decoder is_cross_attention = key_value_states is not None batch_size, target_len, embed_dim = hidden_states.size() # add position embeddings to the hidden states before projecting to queries and keys if object_queries is not None: hidden_states_original = hidden_states hidden_states = self.with_pos_embed(hidden_states, object_queries)
2,867
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/table_transformer/modeling_table_transformer.py
# add key-value position embeddings to the key value states if spatial_position_embeddings is not None: key_value_states_original = key_value_states key_value_states = self.with_pos_embed(key_value_states, spatial_position_embeddings) # get query proj query_states = self.q_proj(hidden_states) * self.scaling # get key, value proj if is_cross_attention: # cross_attentions key_states = self._shape(self.k_proj(key_value_states), -1, batch_size) value_states = self._shape(self.v_proj(key_value_states_original), -1, batch_size) else: # self_attention key_states = self._shape(self.k_proj(hidden_states), -1, batch_size) value_states = self._shape(self.v_proj(hidden_states_original), -1, batch_size)
2,867
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/table_transformer/modeling_table_transformer.py
proj_shape = (batch_size * self.num_heads, -1, self.head_dim) query_states = self._shape(query_states, target_len, batch_size).view(*proj_shape) key_states = key_states.view(*proj_shape) value_states = value_states.view(*proj_shape) source_len = key_states.size(1) attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) if attn_weights.size() != (batch_size * self.num_heads, target_len, source_len): raise ValueError( f"Attention weights should be of size {(batch_size * self.num_heads, target_len, source_len)}, but is" f" {attn_weights.size()}" )
2,867
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/table_transformer/modeling_table_transformer.py
if attention_mask is not None: if attention_mask.size() != (batch_size, 1, target_len, source_len): raise ValueError( f"Attention mask should be of size {(batch_size, 1, target_len, source_len)}, but is" f" {attention_mask.size()}" ) attn_weights = attn_weights.view(batch_size, self.num_heads, target_len, source_len) + attention_mask attn_weights = attn_weights.view(batch_size * self.num_heads, target_len, source_len) attn_weights = nn.functional.softmax(attn_weights, dim=-1)
2,867
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/table_transformer/modeling_table_transformer.py
if output_attentions: # this operation is a bit awkward, but it's required to # make sure that attn_weights keeps its gradient. # In order to do so, attn_weights have to reshaped # twice and have to be reused in the following attn_weights_reshaped = attn_weights.view(batch_size, self.num_heads, target_len, source_len) attn_weights = attn_weights_reshaped.view(batch_size * self.num_heads, target_len, source_len) else: attn_weights_reshaped = None attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) attn_output = torch.bmm(attn_probs, value_states) if attn_output.size() != (batch_size * self.num_heads, target_len, self.head_dim): raise ValueError( f"`attn_output` should be of size {(batch_size, self.num_heads, target_len, self.head_dim)}, but is" f" {attn_output.size()}" )
2,867
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/table_transformer/modeling_table_transformer.py
attn_output = attn_output.view(batch_size, self.num_heads, target_len, self.head_dim) attn_output = attn_output.transpose(1, 2) attn_output = attn_output.reshape(batch_size, target_len, embed_dim) attn_output = self.out_proj(attn_output) return attn_output, attn_weights_reshaped
2,867
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/table_transformer/modeling_table_transformer.py
class TableTransformerEncoderLayer(nn.Module): # Copied from transformers.models.detr.modeling_detr.DetrEncoderLayer.__init__ with Detr->TableTransformer def __init__(self, config: TableTransformerConfig): super().__init__() self.embed_dim = config.d_model self.self_attn = TableTransformerAttention( embed_dim=self.embed_dim, num_heads=config.encoder_attention_heads, dropout=config.attention_dropout, ) self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.activation_dropout = config.activation_dropout self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim) self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim) self.final_layer_norm = nn.LayerNorm(self.embed_dim)
2,868
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/table_transformer/modeling_table_transformer.py
def forward( self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, object_queries: torch.Tensor = None, output_attentions: bool = False, ): """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, target_len, source_len)` where padding elements are indicated by very large negative values. object_queries (`torch.FloatTensor`, *optional*): object queries, to be added to hidden_states. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. """ residual = hidden_states hidden_states = self.self_attn_layer_norm(hidden_states)
2,868
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/table_transformer/modeling_table_transformer.py
hidden_states, attn_weights = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, object_queries=object_queries, output_attentions=output_attentions, ) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states residual = hidden_states hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) hidden_states = self.fc2(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states
2,868
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/table_transformer/modeling_table_transformer.py
if self.training: if torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any(): clamp_value = torch.finfo(hidden_states.dtype).max - 1000 hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs
2,868
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/table_transformer/modeling_table_transformer.py
class TableTransformerDecoderLayer(nn.Module): # Copied from transformers.models.detr.modeling_detr.DetrDecoderLayer.__init__ with Detr->TableTransformer def __init__(self, config: TableTransformerConfig): super().__init__() self.embed_dim = config.d_model self.self_attn = TableTransformerAttention( embed_dim=self.embed_dim, num_heads=config.decoder_attention_heads, dropout=config.attention_dropout, ) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.activation_dropout = config.activation_dropout
2,869
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/table_transformer/modeling_table_transformer.py
self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.encoder_attn = TableTransformerAttention( self.embed_dim, config.decoder_attention_heads, dropout=config.attention_dropout, ) self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim) self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim) self.final_layer_norm = nn.LayerNorm(self.embed_dim)
2,869
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/table_transformer/modeling_table_transformer.py
def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, object_queries: Optional[torch.Tensor] = None, query_position_embeddings: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = False, ): """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, target_len, source_len)` where padding elements are indicated by very large negative values. object_queries (`torch.FloatTensor`, *optional*): object queries that are added to the queries and keys in the cross-attention layer. query_position_embeddings (`torch.FloatTensor`, *optional*):
2,869
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/table_transformer/modeling_table_transformer.py
object queries that are added to the queries and keys in the self-attention layer. encoder_hidden_states (`torch.FloatTensor`): cross attention input to the layer of shape `(batch, seq_len, embed_dim)` encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size `(batch, 1, target_len, source_len)` where padding elements are indicated by very large negative values. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. """ residual = hidden_states hidden_states = self.self_attn_layer_norm(hidden_states)
2,869
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/table_transformer/modeling_table_transformer.py
# Self Attention hidden_states, self_attn_weights = self.self_attn( hidden_states=hidden_states, object_queries=query_position_embeddings, attention_mask=attention_mask, output_attentions=output_attentions, ) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states residual = hidden_states hidden_states = self.encoder_attn_layer_norm(hidden_states)
2,869
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/table_transformer/modeling_table_transformer.py
# Cross-Attention Block cross_attn_weights = None if encoder_hidden_states is not None: hidden_states, cross_attn_weights = self.encoder_attn( hidden_states=hidden_states, object_queries=query_position_embeddings, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, spatial_position_embeddings=object_queries, output_attentions=output_attentions, ) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states residual = hidden_states hidden_states = self.final_layer_norm(hidden_states)
2,869
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/table_transformer/modeling_table_transformer.py
# Fully Connected hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) hidden_states = self.fc2(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights, cross_attn_weights) return outputs
2,869
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/table_transformer/modeling_table_transformer.py
class TableTransformerPreTrainedModel(PreTrainedModel): config_class = TableTransformerConfig base_model_prefix = "model" main_input_name = "pixel_values" _no_split_modules = [ r"TableTransformerConvEncoder", r"TableTransformerEncoderLayer", r"TableTransformerDecoderLayer", ] def _init_weights(self, module): std = self.config.init_std
2,870
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/table_transformer/modeling_table_transformer.py
if isinstance(module, TableTransformerLearnedPositionEmbedding): nn.init.uniform_(module.row_embeddings.weight) nn.init.uniform_(module.column_embeddings.weight) if isinstance(module, (nn.Linear, nn.Conv2d, nn.BatchNorm2d)): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_()
2,870
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/table_transformer/modeling_table_transformer.py
class TableTransformerEncoder(TableTransformerPreTrainedModel): """ Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a [`TableTransformerEncoderLayer`]. The encoder updates the flattened feature map through multiple self-attention layers. Small tweak for Table Transformer: - object_queries are added to the forward pass. Args: config: TableTransformerConfig """ def __init__(self, config: TableTransformerConfig): super().__init__(config) self.dropout = config.dropout self.layerdrop = config.encoder_layerdrop self.layers = nn.ModuleList([TableTransformerEncoderLayer(config) for _ in range(config.encoder_layers)]) self.layernorm = nn.LayerNorm(config.d_model) # Initialize weights and apply final processing self.post_init()
2,871
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/table_transformer/modeling_table_transformer.py
def forward( self, inputs_embeds=None, attention_mask=None, object_queries=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" Args: inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Flattened feature map (output of the backbone + projection layer) that is passed to the encoder. attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding pixel features. Mask values selected in `[0, 1]`: - 1 for pixel features that are real (i.e. **not masked**), - 0 for pixel features that are padding (i.e. **masked**). [What are attention masks?](../glossary#attention-mask)
2,871
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/table_transformer/modeling_table_transformer.py
object_queries (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Position embeddings that are added to the queries and keys in each self-attention layer.
2,871
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/table_transformer/modeling_table_transformer.py
output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict
2,871
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/table_transformer/modeling_table_transformer.py
hidden_states = inputs_embeds hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) # expand attention_mask if attention_mask is not None: # [batch_size, seq_len] -> [batch_size, 1, target_seq_len, source_seq_len] attention_mask = _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype) encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None for encoder_layer in self.layers: if output_hidden_states: encoder_states = encoder_states + (hidden_states,) # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) to_drop = False if self.training: dropout_probability = torch.rand([]) if dropout_probability < self.layerdrop: # skip the layer to_drop = True
2,871
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/table_transformer/modeling_table_transformer.py
if to_drop: layer_outputs = (None, None) else: # we add object_queries as extra input to the encoder_layer layer_outputs = encoder_layer( hidden_states, attention_mask, object_queries=object_queries, output_attentions=output_attentions, ) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) hidden_states = self.layernorm(hidden_states) if not return_dict: return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions )
2,871
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/table_transformer/modeling_table_transformer.py
class TableTransformerDecoder(TableTransformerPreTrainedModel): """ Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`TableTransformerDecoderLayer`]. The decoder updates the query embeddings through multiple self-attention and cross-attention layers. Some small tweaks for TABLE_TRANSFORMER: - object_queries and query_position_embeddings are added to the forward pass. - if self.config.auxiliary_loss is set to True, also returns a stack of activations from all decoding layers. Args: config: TableTransformerConfig """ def __init__(self, config: TableTransformerConfig): super().__init__(config) self.dropout = config.dropout self.layerdrop = config.decoder_layerdrop
2,872
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/table_transformer/modeling_table_transformer.py
self.layers = nn.ModuleList([TableTransformerDecoderLayer(config) for _ in range(config.decoder_layers)]) # in TABLE_TRANSFORMER, the decoder uses layernorm after the last decoder layer output self.layernorm = nn.LayerNorm(config.d_model) self.gradient_checkpointing = False # Initialize weights and apply final processing self.post_init() def forward( self, inputs_embeds=None, attention_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, object_queries=None, query_position_embeddings=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" Args: inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): The query embeddings that are passed into the decoder.
2,872
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/table_transformer/modeling_table_transformer.py
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on certain queries. Mask values selected in `[0, 1]`: - 1 for queries that are **not masked**, - 0 for queries that are **masked**. [What are attention masks?](../glossary#attention-mask) encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*): Mask to avoid performing cross-attention on padding pixel_values of the encoder. Mask values selected in `[0, 1]`:
2,872
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/table_transformer/modeling_table_transformer.py
- 1 for pixels that are real (i.e. **not masked**), - 0 for pixels that are padding (i.e. **masked**). object_queries (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Object queries that are added to the queries and keys in each cross-attention layer. query_position_embeddings (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_size)`): , *optional*): Position embeddings that are added to the values and keys in each self-attention layer.
2,872
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/table_transformer/modeling_table_transformer.py
output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict
2,872
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/table_transformer/modeling_table_transformer.py
if inputs_embeds is not None: hidden_states = inputs_embeds input_shape = inputs_embeds.size()[:-1] combined_attention_mask = None if attention_mask is not None and combined_attention_mask is not None: # [batch_size, seq_len] -> [batch_size, 1, target_seq_len, source_seq_len] combined_attention_mask = combined_attention_mask + _prepare_4d_attention_mask( attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1] ) # expand encoder attention mask if encoder_hidden_states is not None and encoder_attention_mask is not None: # [batch_size, seq_len] -> [batch_size, 1, target_seq_len, source_seq_len] encoder_attention_mask = _prepare_4d_attention_mask( encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1] ) # optional intermediate hidden states intermediate = () if self.config.auxiliary_loss else None
2,872
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/table_transformer/modeling_table_transformer.py
# decoder layers all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None for idx, decoder_layer in enumerate(self.layers): # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) if output_hidden_states: all_hidden_states += (hidden_states,) if self.training: dropout_probability = torch.rand([]) if dropout_probability < self.layerdrop: continue
2,872
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/table_transformer/modeling_table_transformer.py
if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( decoder_layer.__call__, hidden_states, combined_attention_mask, encoder_hidden_states, encoder_attention_mask, None, ) else: layer_outputs = decoder_layer( hidden_states, attention_mask=combined_attention_mask, object_queries=object_queries, query_position_embeddings=query_position_embeddings, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_attentions=output_attentions, ) hidden_states = layer_outputs[0]
2,872
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/table_transformer/modeling_table_transformer.py
if self.config.auxiliary_loss: hidden_states = self.layernorm(hidden_states) intermediate += (hidden_states,) if output_attentions: all_self_attns += (layer_outputs[1],) if encoder_hidden_states is not None: all_cross_attentions += (layer_outputs[2],) # finally, apply layernorm hidden_states = self.layernorm(hidden_states) # add hidden states from the last decoder layer if output_hidden_states: all_hidden_states += (hidden_states,) # stack intermediate decoder activations if self.config.auxiliary_loss: intermediate = torch.stack(intermediate)
2,872
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/table_transformer/modeling_table_transformer.py
if not return_dict: return tuple( v for v in [hidden_states, all_hidden_states, all_self_attns, all_cross_attentions, intermediate] if v is not None ) return TableTransformerDecoderOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attns, cross_attentions=all_cross_attentions, intermediate_hidden_states=intermediate, )
2,872
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/table_transformer/modeling_table_transformer.py
class TableTransformerModel(TableTransformerPreTrainedModel): # Copied from transformers.models.detr.modeling_detr.DetrModel.__init__ with Detr->TableTransformer def __init__(self, config: TableTransformerConfig): super().__init__(config) # Create backbone + positional encoding backbone = TableTransformerConvEncoder(config) object_queries = build_position_encoding(config) self.backbone = TableTransformerConvModel(backbone, object_queries) # Create projection layer self.input_projection = nn.Conv2d(backbone.intermediate_channel_sizes[-1], config.d_model, kernel_size=1) self.query_position_embeddings = nn.Embedding(config.num_queries, config.d_model) self.encoder = TableTransformerEncoder(config) self.decoder = TableTransformerDecoder(config) # Initialize weights and apply final processing self.post_init() def get_encoder(self): return self.encoder
2,873
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/table_transformer/modeling_table_transformer.py
def get_decoder(self): return self.decoder def freeze_backbone(self): for name, param in self.backbone.conv_encoder.model.named_parameters(): param.requires_grad_(False) def unfreeze_backbone(self): for name, param in self.backbone.conv_encoder.model.named_parameters(): param.requires_grad_(True)
2,873
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/table_transformer/modeling_table_transformer.py
@add_start_docstrings_to_model_forward(TABLE_TRANSFORMER_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=TableTransformerModelOutput, config_class=_CONFIG_FOR_DOC) def forward( self, pixel_values: torch.FloatTensor, pixel_mask: Optional[torch.FloatTensor] = None, decoder_attention_mask: Optional[torch.FloatTensor] = None, encoder_outputs: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, decoder_inputs_embeds: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.FloatTensor], TableTransformerModelOutput]: r""" Returns: Examples: ```python >>> from transformers import AutoImageProcessor, TableTransformerModel >>> from huggingface_hub import hf_hub_download >>> from PIL import Image
2,873
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/table_transformer/modeling_table_transformer.py
>>> file_path = hf_hub_download(repo_id="nielsr/example-pdf", repo_type="dataset", filename="example_pdf.png") >>> image = Image.open(file_path).convert("RGB") >>> image_processor = AutoImageProcessor.from_pretrained("microsoft/table-transformer-detection") >>> model = TableTransformerModel.from_pretrained("microsoft/table-transformer-detection") >>> # prepare image for the model >>> inputs = image_processor(images=image, return_tensors="pt") >>> # forward pass >>> outputs = model(**inputs)
2,873
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/table_transformer/modeling_table_transformer.py
>>> # the last hidden states are the final query embeddings of the Transformer decoder >>> # these are of shape (batch_size, num_queries, hidden_size) >>> last_hidden_states = outputs.last_hidden_state >>> list(last_hidden_states.shape) [1, 15, 256] ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict batch_size, num_channels, height, width = pixel_values.shape device = pixel_values.device if pixel_mask is None: pixel_mask = torch.ones(((batch_size, height, width)), device=device)
2,873
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/table_transformer/modeling_table_transformer.py
# First, sent pixel_values + pixel_mask through Backbone to obtain the features # pixel_values should be of shape (batch_size, num_channels, height, width) # pixel_mask should be of shape (batch_size, height, width) features, position_embeddings_list = self.backbone(pixel_values, pixel_mask) # get final feature map and downsampled mask feature_map, mask = features[-1] if mask is None: raise ValueError("Backbone does not return downsampled pixel mask") # Second, apply 1x1 convolution to reduce the channel dimension to d_model (256 by default) projected_feature_map = self.input_projection(feature_map)
2,873
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/table_transformer/modeling_table_transformer.py
# Third, flatten the feature map + object queries of shape NxCxHxW to NxCxHW, and permute it to NxHWxC # In other words, turn their shape into (batch_size, sequence_length, hidden_size) flattened_features = projected_feature_map.flatten(2).permute(0, 2, 1) object_queries = position_embeddings_list[-1].flatten(2).permute(0, 2, 1) flattened_mask = mask.flatten(1)
2,873
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/table_transformer/modeling_table_transformer.py
# Fourth, sent flattened_features + flattened_mask + object queries through encoder # flattened_features is a Tensor of shape (batch_size, heigth*width, hidden_size) # flattened_mask is a Tensor of shape (batch_size, heigth*width) if encoder_outputs is None: encoder_outputs = self.encoder( inputs_embeds=flattened_features, attention_mask=flattened_mask, object_queries=object_queries, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) # If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True elif return_dict and not isinstance(encoder_outputs, BaseModelOutput): encoder_outputs = BaseModelOutput( last_hidden_state=encoder_outputs[0],
2,873
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/table_transformer/modeling_table_transformer.py
hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, )
2,873
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/table_transformer/modeling_table_transformer.py
# Fifth, sent query embeddings + object queries through the decoder (which is conditioned on the encoder output) query_position_embeddings = self.query_position_embeddings.weight.unsqueeze(0).repeat(batch_size, 1, 1) queries = torch.zeros_like(query_position_embeddings) # decoder outputs consists of (dec_features, dec_hidden, dec_attn) decoder_outputs = self.decoder( inputs_embeds=queries, attention_mask=None, object_queries=object_queries, query_position_embeddings=query_position_embeddings, encoder_hidden_states=encoder_outputs[0], encoder_attention_mask=flattened_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) if not return_dict: return decoder_outputs + encoder_outputs
2,873
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/table_transformer/modeling_table_transformer.py
return TableTransformerModelOutput( last_hidden_state=decoder_outputs.last_hidden_state, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, intermediate_hidden_states=decoder_outputs.intermediate_hidden_states, )
2,873
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/table_transformer/modeling_table_transformer.py
class TableTransformerForObjectDetection(TableTransformerPreTrainedModel): # Copied from transformers.models.detr.modeling_detr.DetrForObjectDetection.__init__ with Detr->TableTransformer def __init__(self, config: TableTransformerConfig): super().__init__(config) # DETR encoder-decoder model self.model = TableTransformerModel(config) # Object detection heads self.class_labels_classifier = nn.Linear( config.d_model, config.num_labels + 1 ) # We add one for the "no object" class self.bbox_predictor = TableTransformerMLPPredictionHead( input_dim=config.d_model, hidden_dim=config.d_model, output_dim=4, num_layers=3 ) # Initialize weights and apply final processing self.post_init()
2,874
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/table_transformer/modeling_table_transformer.py
@add_start_docstrings_to_model_forward(TABLE_TRANSFORMER_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=TableTransformerObjectDetectionOutput, config_class=_CONFIG_FOR_DOC) def forward( self, pixel_values: torch.FloatTensor, pixel_mask: Optional[torch.FloatTensor] = None, decoder_attention_mask: Optional[torch.FloatTensor] = None, encoder_outputs: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, decoder_inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[List[Dict]] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.FloatTensor], TableTransformerObjectDetectionOutput]: r""" labels (`List[Dict]` of len `(batch_size,)`, *optional*):
2,874
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/table_transformer/modeling_table_transformer.py
Labels for computing the bipartite matching loss. List of dicts, each dictionary containing at least the following 2 keys: 'class_labels' and 'boxes' (the class labels and bounding boxes of an image in the batch respectively). The class labels themselves should be a `torch.LongTensor` of len `(number of bounding boxes in the image,)` and the boxes a `torch.FloatTensor` of shape `(number of bounding boxes in the image, 4)`.
2,874
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/table_transformer/modeling_table_transformer.py
Returns: Examples: ```python >>> from huggingface_hub import hf_hub_download >>> from transformers import AutoImageProcessor, TableTransformerForObjectDetection >>> import torch >>> from PIL import Image >>> file_path = hf_hub_download(repo_id="nielsr/example-pdf", repo_type="dataset", filename="example_pdf.png") >>> image = Image.open(file_path).convert("RGB") >>> image_processor = AutoImageProcessor.from_pretrained("microsoft/table-transformer-detection") >>> model = TableTransformerForObjectDetection.from_pretrained("microsoft/table-transformer-detection") >>> inputs = image_processor(images=image, return_tensors="pt") >>> outputs = model(**inputs)
2,874
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/table_transformer/modeling_table_transformer.py
>>> # convert outputs (bounding boxes and class logits) to Pascal VOC format (xmin, ymin, xmax, ymax) >>> target_sizes = torch.tensor([image.size[::-1]]) >>> results = image_processor.post_process_object_detection(outputs, threshold=0.9, target_sizes=target_sizes)[ ... 0 ... ] >>> for score, label, box in zip(results["scores"], results["labels"], results["boxes"]): ... box = [round(i, 2) for i in box.tolist()] ... print( ... f"Detected {model.config.id2label[label.item()]} with confidence " ... f"{round(score.item(), 3)} at location {box}" ... ) Detected table with confidence 1.0 at location [202.1, 210.59, 1119.22, 385.09] ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict
2,874
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/table_transformer/modeling_table_transformer.py
# First, sent images through TABLE_TRANSFORMER base model to obtain encoder + decoder outputs outputs = self.model( pixel_values, pixel_mask=pixel_mask, decoder_attention_mask=decoder_attention_mask, encoder_outputs=encoder_outputs, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] # class logits + predicted bounding boxes logits = self.class_labels_classifier(sequence_output) pred_boxes = self.bbox_predictor(sequence_output).sigmoid()
2,874
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/table_transformer/modeling_table_transformer.py
loss, loss_dict, auxiliary_outputs = None, None, None if labels is not None: outputs_class, outputs_coord = None, None if self.config.auxiliary_loss: intermediate = outputs.intermediate_hidden_states if return_dict else outputs[4] outputs_class = self.class_labels_classifier(intermediate) outputs_coord = self.bbox_predictor(intermediate).sigmoid() loss, loss_dict, auxiliary_outputs = self.loss_function( logits, labels, self.device, pred_boxes, self.config, outputs_class, outputs_coord ) if not return_dict: if auxiliary_outputs is not None: output = (logits, pred_boxes) + auxiliary_outputs + outputs else: output = (logits, pred_boxes) + outputs return ((loss, loss_dict) + output) if loss is not None else output
2,874
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/table_transformer/modeling_table_transformer.py
return TableTransformerObjectDetectionOutput( loss=loss, loss_dict=loss_dict, logits=logits, pred_boxes=pred_boxes, auxiliary_outputs=auxiliary_outputs, last_hidden_state=outputs.last_hidden_state, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions, )
2,874
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/table_transformer/modeling_table_transformer.py
class TableTransformerMLPPredictionHead(nn.Module): """ Very simple multi-layer perceptron (MLP, also called FFN), used to predict the normalized center coordinates, height and width of a bounding box w.r.t. an image. Copied from https://github.com/facebookresearch/table_transformer/blob/master/models/table_transformer.py """ def __init__(self, input_dim, hidden_dim, output_dim, num_layers): super().__init__() self.num_layers = num_layers h = [hidden_dim] * (num_layers - 1) self.layers = nn.ModuleList(nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim])) def forward(self, x): for i, layer in enumerate(self.layers): x = nn.functional.relu(layer(x)) if i < self.num_layers - 1 else layer(x) return x
2,875
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/table_transformer/modeling_table_transformer.py
class TableTransformerConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`TableTransformerModel`]. It is used to instantiate a Table Transformer model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Table Transformer [microsoft/table-transformer-detection](https://huggingface.co/microsoft/table-transformer-detection) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.
2,876
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/table_transformer/configuration_table_transformer.py
Args: use_timm_backbone (`bool`, *optional*, defaults to `True`): Whether or not to use the `timm` library for the backbone. If set to `False`, will use the [`AutoBackbone`] API. backbone_config (`PretrainedConfig` or `dict`, *optional*): The configuration of the backbone model. Only used in case `use_timm_backbone` is set to `False` in which case it will default to `ResNetConfig()`. num_channels (`int`, *optional*, defaults to 3): The number of input channels. num_queries (`int`, *optional*, defaults to 100): Number of object queries, i.e. detection slots. This is the maximal number of objects [`TableTransformerModel`] can detect in a single image. For COCO, we recommend 100 queries. d_model (`int`, *optional*, defaults to 256): Dimension of the layers. encoder_layers (`int`, *optional*, defaults to 6): Number of encoder layers.
2,876
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/table_transformer/configuration_table_transformer.py
decoder_layers (`int`, *optional*, defaults to 6): Number of decoder layers. encoder_attention_heads (`int`, *optional*, defaults to 8): Number of attention heads for each attention layer in the Transformer encoder. decoder_attention_heads (`int`, *optional*, defaults to 8): Number of attention heads for each attention layer in the Transformer decoder. decoder_ffn_dim (`int`, *optional*, defaults to 2048): Dimension of the "intermediate" (often named feed-forward) layer in decoder. encoder_ffn_dim (`int`, *optional*, defaults to 2048): Dimension of the "intermediate" (often named feed-forward) layer in decoder. activation_function (`str` or `function`, *optional*, defaults to `"relu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported.
2,876
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/table_transformer/configuration_table_transformer.py
dropout (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. activation_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for activations inside the fully connected layer. init_std (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. init_xavier_std (`float`, *optional*, defaults to 1): The scaling factor used for the Xavier initialization gain in the HM Attention map module. encoder_layerdrop (`float`, *optional*, defaults to 0.0): The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) for more details.
2,876
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/table_transformer/configuration_table_transformer.py
decoder_layerdrop (`float`, *optional*, defaults to 0.0): The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) for more details. auxiliary_loss (`bool`, *optional*, defaults to `False`): Whether auxiliary decoding losses (loss at each decoder layer) are to be used. position_embedding_type (`str`, *optional*, defaults to `"sine"`): Type of position embeddings to be used on top of the image features. One of `"sine"` or `"learned"`. backbone (`str`, *optional*): Name of backbone to use when `backbone_config` is `None`. If `use_pretrained_backbone` is `True`, this will load the corresponding pretrained weights from the timm or transformers library. If `use_pretrained_backbone` is `False`, this loads the backbone's config and uses that to initialize the backbone with random weights. use_pretrained_backbone (`bool`, *optional*, `True`):
2,876
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/table_transformer/configuration_table_transformer.py
Whether to use pretrained weights for the backbone. backbone_kwargs (`dict`, *optional*): Keyword arguments to be passed to AutoBackbone when loading from a checkpoint e.g. `{'out_indices': (0, 1, 2, 3)}`. Cannot be specified if `backbone_config` is set. dilation (`bool`, *optional*, defaults to `False`): Whether to replace stride with dilation in the last convolutional block (DC5). Only supported when `use_timm_backbone` = `True`. class_cost (`float`, *optional*, defaults to 1): Relative weight of the classification error in the Hungarian matching cost. bbox_cost (`float`, *optional*, defaults to 5): Relative weight of the L1 error of the bounding box coordinates in the Hungarian matching cost. giou_cost (`float`, *optional*, defaults to 2): Relative weight of the generalized IoU loss of the bounding box in the Hungarian matching cost.
2,876
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/table_transformer/configuration_table_transformer.py
mask_loss_coefficient (`float`, *optional*, defaults to 1): Relative weight of the Focal loss in the panoptic segmentation loss. dice_loss_coefficient (`float`, *optional*, defaults to 1): Relative weight of the DICE/F-1 loss in the panoptic segmentation loss. bbox_loss_coefficient (`float`, *optional*, defaults to 5): Relative weight of the L1 bounding box loss in the object detection loss. giou_loss_coefficient (`float`, *optional*, defaults to 2): Relative weight of the generalized IoU loss in the object detection loss. eos_coefficient (`float`, *optional*, defaults to 0.1): Relative classification weight of the 'no-object' class in the object detection loss.
2,876
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/table_transformer/configuration_table_transformer.py
Examples: ```python >>> from transformers import TableTransformerModel, TableTransformerConfig >>> # Initializing a Table Transformer microsoft/table-transformer-detection style configuration >>> configuration = TableTransformerConfig() >>> # Initializing a model from the microsoft/table-transformer-detection style configuration >>> model = TableTransformerModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "table-transformer" keys_to_ignore_at_inference = ["past_key_values"] attribute_map = { "hidden_size": "d_model", "num_attention_heads": "encoder_attention_heads", }
2,876
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/table_transformer/configuration_table_transformer.py
# Copied from transformers.models.detr.configuration_detr.DetrConfig.__init__ def __init__( self, use_timm_backbone=True, backbone_config=None, num_channels=3, num_queries=100, encoder_layers=6, encoder_ffn_dim=2048, encoder_attention_heads=8, decoder_layers=6, decoder_ffn_dim=2048, decoder_attention_heads=8, encoder_layerdrop=0.0, decoder_layerdrop=0.0, is_encoder_decoder=True, activation_function="relu", d_model=256, dropout=0.1, attention_dropout=0.0, activation_dropout=0.0, init_std=0.02, init_xavier_std=1.0, auxiliary_loss=False, position_embedding_type="sine", backbone="resnet50", use_pretrained_backbone=True, backbone_kwargs=None, dilation=False, class_cost=1, bbox_cost=5, giou_cost=2, mask_loss_coefficient=1, dice_loss_coefficient=1,
2,876
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/table_transformer/configuration_table_transformer.py