diff --git a/.gitattributes b/.gitattributes
index fa4ece9ef11f14984b70296c7f53389c624a24ed..6ae36704789fbcc40500301f193fd294c22bfce1 100644
--- a/.gitattributes
+++ b/.gitattributes
@@ -441,3 +441,4 @@ janus/lib/libtinfow.so filter=lfs diff=lfs merge=lfs -text
janus/lib/libtinfow.so.6 filter=lfs diff=lfs merge=lfs -text
janus/lib/python3.10/site-packages/transformers/generation/__pycache__/logits_process.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
janus/lib/python3.10/site-packages/transformers/models/oneformer/__pycache__/modeling_oneformer.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
+janus/lib/python3.10/site-packages/transformers/models/perceiver/__pycache__/modeling_perceiver.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
diff --git a/janus/lib/python3.10/site-packages/transformers/models/__pycache__/__init__.cpython-310.pyc b/janus/lib/python3.10/site-packages/transformers/models/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3a229b9f10cb497e8ef466e2173a333eb9cdae3a
Binary files /dev/null and b/janus/lib/python3.10/site-packages/transformers/models/__pycache__/__init__.cpython-310.pyc differ
diff --git a/janus/lib/python3.10/site-packages/transformers/models/autoformer/__init__.py b/janus/lib/python3.10/site-packages/transformers/models/autoformer/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..1ef70173e30a43fbead6900785b9bfd92b3d38ec
--- /dev/null
+++ b/janus/lib/python3.10/site-packages/transformers/models/autoformer/__init__.py
@@ -0,0 +1,57 @@
+# Copyright 2023 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import TYPE_CHECKING
+
+# rely on isort to merge the imports
+from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
+
+
+_import_structure = {
+ "configuration_autoformer": ["AutoformerConfig"],
+}
+
+try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_autoformer"] = [
+ "AutoformerForPrediction",
+ "AutoformerModel",
+ "AutoformerPreTrainedModel",
+ ]
+
+
+if TYPE_CHECKING:
+ from .configuration_autoformer import (
+ AutoformerConfig,
+ )
+
+ try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_autoformer import (
+ AutoformerForPrediction,
+ AutoformerModel,
+ AutoformerPreTrainedModel,
+ )
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
diff --git a/janus/lib/python3.10/site-packages/transformers/models/autoformer/__pycache__/__init__.cpython-310.pyc b/janus/lib/python3.10/site-packages/transformers/models/autoformer/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..217b9b2a8e6900355a5cb03c31b4fe99238c4824
Binary files /dev/null and b/janus/lib/python3.10/site-packages/transformers/models/autoformer/__pycache__/__init__.cpython-310.pyc differ
diff --git a/janus/lib/python3.10/site-packages/transformers/models/autoformer/__pycache__/configuration_autoformer.cpython-310.pyc b/janus/lib/python3.10/site-packages/transformers/models/autoformer/__pycache__/configuration_autoformer.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..41741492d1dc402cd247ed82c5317b5923487e55
Binary files /dev/null and b/janus/lib/python3.10/site-packages/transformers/models/autoformer/__pycache__/configuration_autoformer.cpython-310.pyc differ
diff --git a/janus/lib/python3.10/site-packages/transformers/models/autoformer/__pycache__/modeling_autoformer.cpython-310.pyc b/janus/lib/python3.10/site-packages/transformers/models/autoformer/__pycache__/modeling_autoformer.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a116497375dc7a9a513b6b89e57c31658ec97545
Binary files /dev/null and b/janus/lib/python3.10/site-packages/transformers/models/autoformer/__pycache__/modeling_autoformer.cpython-310.pyc differ
diff --git a/janus/lib/python3.10/site-packages/transformers/models/autoformer/configuration_autoformer.py b/janus/lib/python3.10/site-packages/transformers/models/autoformer/configuration_autoformer.py
new file mode 100644
index 0000000000000000000000000000000000000000..f5a4356ce8b49b8c772ad9789cde85eff52f579c
--- /dev/null
+++ b/janus/lib/python3.10/site-packages/transformers/models/autoformer/configuration_autoformer.py
@@ -0,0 +1,242 @@
+# coding=utf-8
+# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Autoformer model configuration"""
+
+from typing import List, Optional
+
+from ...configuration_utils import PretrainedConfig
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+
+class AutoformerConfig(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of an [`AutoformerModel`]. It is used to instantiate an
+ Autoformer model according to the specified arguments, defining the model architecture. Instantiating a
+ configuration with the defaults will yield a similar configuration to that of the Autoformer
+ [huggingface/autoformer-tourism-monthly](https://huggingface.co/huggingface/autoformer-tourism-monthly)
+ architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+ Args:
+ prediction_length (`int`):
+ The prediction length for the decoder. In other words, the prediction horizon of the model.
+ context_length (`int`, *optional*, defaults to `prediction_length`):
+ The context length for the encoder. If unset, the context length will be the same as the
+ `prediction_length`.
+ distribution_output (`string`, *optional*, defaults to `"student_t"`):
+ The distribution emission head for the model. Could be either "student_t", "normal" or "negative_binomial".
+ loss (`string`, *optional*, defaults to `"nll"`):
+ The loss function for the model corresponding to the `distribution_output` head. For parametric
+ distributions it is the negative log likelihood (nll) - which currently is the only supported one.
+ input_size (`int`, *optional*, defaults to 1):
+ The size of the target variable which by default is 1 for univariate targets. Would be > 1 in case of
+ multivariate targets.
+ lags_sequence (`list[int]`, *optional*, defaults to `[1, 2, 3, 4, 5, 6, 7]`):
+ The lags of the input time series as covariates often dictated by the frequency. Default is `[1, 2, 3, 4,
+ 5, 6, 7]`.
+ scaling (`bool`, *optional* defaults to `True`):
+ Whether to scale the input targets.
+ num_time_features (`int`, *optional*, defaults to 0):
+ The number of time features in the input time series.
+ num_dynamic_real_features (`int`, *optional*, defaults to 0):
+ The number of dynamic real valued features.
+ num_static_categorical_features (`int`, *optional*, defaults to 0):
+ The number of static categorical features.
+ num_static_real_features (`int`, *optional*, defaults to 0):
+ The number of static real valued features.
+ cardinality (`list[int]`, *optional*):
+ The cardinality (number of different values) for each of the static categorical features. Should be a list
+ of integers, having the same length as `num_static_categorical_features`. Cannot be `None` if
+ `num_static_categorical_features` is > 0.
+ embedding_dimension (`list[int]`, *optional*):
+ The dimension of the embedding for each of the static categorical features. Should be a list of integers,
+ having the same length as `num_static_categorical_features`. Cannot be `None` if
+ `num_static_categorical_features` is > 0.
+ d_model (`int`, *optional*, defaults to 64):
+ Dimensionality of the transformer layers.
+ encoder_layers (`int`, *optional*, defaults to 2):
+ Number of encoder layers.
+ decoder_layers (`int`, *optional*, defaults to 2):
+ Number of decoder layers.
+ encoder_attention_heads (`int`, *optional*, defaults to 2):
+ Number of attention heads for each attention layer in the Transformer encoder.
+ decoder_attention_heads (`int`, *optional*, defaults to 2):
+ Number of attention heads for each attention layer in the Transformer decoder.
+ encoder_ffn_dim (`int`, *optional*, defaults to 32):
+ Dimension of the "intermediate" (often named feed-forward) layer in encoder.
+ decoder_ffn_dim (`int`, *optional*, defaults to 32):
+ Dimension of the "intermediate" (often named feed-forward) layer in decoder.
+ activation_function (`str` or `function`, *optional*, defaults to `"gelu"`):
+ The non-linear activation function (function or string) in the encoder and decoder. If string, `"gelu"` and
+ `"relu"` are supported.
+ dropout (`float`, *optional*, defaults to 0.1):
+ The dropout probability for all fully connected layers in the encoder, and decoder.
+ encoder_layerdrop (`float`, *optional*, defaults to 0.1):
+ The dropout probability for the attention and fully connected layers for each encoder layer.
+ decoder_layerdrop (`float`, *optional*, defaults to 0.1):
+ The dropout probability for the attention and fully connected layers for each decoder layer.
+ attention_dropout (`float`, *optional*, defaults to 0.1):
+ The dropout probability for the attention probabilities.
+ activation_dropout (`float`, *optional*, defaults to 0.1):
+ The dropout probability used between the two layers of the feed-forward networks.
+ num_parallel_samples (`int`, *optional*, defaults to 100):
+ The number of samples to generate in parallel for each time step of inference.
+ init_std (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated normal weight initialization distribution.
+ use_cache (`bool`, *optional*, defaults to `True`):
+ Whether to use the past key/values attentions (if applicable to the model) to speed up decoding.
+ label_length (`int`, *optional*, defaults to 10):
+ Start token length of the Autoformer decoder, which is used for direct multi-step prediction (i.e.
+ non-autoregressive generation).
+ moving_average (`int`, *optional*, defaults to 25):
+ The window size of the moving average. In practice, it's the kernel size in AvgPool1d of the Decomposition
+ Layer.
+ autocorrelation_factor (`int`, *optional*, defaults to 3):
+ "Attention" (i.e. AutoCorrelation mechanism) factor which is used to find top k autocorrelations delays.
+ It's recommended in the paper to set it to a number between 1 and 5.
+
+
+ Example:
+
+ ```python
+ >>> from transformers import AutoformerConfig, AutoformerModel
+
+ >>> # Initializing a default Autoformer configuration
+ >>> configuration = AutoformerConfig()
+
+ >>> # Randomly initializing a model (with random weights) from the configuration
+ >>> model = AutoformerModel(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = "autoformer"
+ attribute_map = {
+ "hidden_size": "d_model",
+ "num_attention_heads": "encoder_attention_heads",
+ "num_hidden_layers": "encoder_layers",
+ }
+
+ def __init__(
+ self,
+ prediction_length: Optional[int] = None,
+ context_length: Optional[int] = None,
+ distribution_output: str = "student_t",
+ loss: str = "nll",
+ input_size: int = 1,
+ lags_sequence: List[int] = [1, 2, 3, 4, 5, 6, 7],
+ scaling: bool = True,
+ num_time_features: int = 0,
+ num_dynamic_real_features: int = 0,
+ num_static_categorical_features: int = 0,
+ num_static_real_features: int = 0,
+ cardinality: Optional[List[int]] = None,
+ embedding_dimension: Optional[List[int]] = None,
+ d_model: int = 64,
+ encoder_attention_heads: int = 2,
+ decoder_attention_heads: int = 2,
+ encoder_layers: int = 2,
+ decoder_layers: int = 2,
+ encoder_ffn_dim: int = 32,
+ decoder_ffn_dim: int = 32,
+ activation_function: str = "gelu",
+ dropout: float = 0.1,
+ encoder_layerdrop: float = 0.1,
+ decoder_layerdrop: float = 0.1,
+ attention_dropout: float = 0.1,
+ activation_dropout: float = 0.1,
+ num_parallel_samples: int = 100,
+ init_std: float = 0.02,
+ use_cache: bool = True,
+ is_encoder_decoder=True,
+ # Autoformer arguments
+ label_length: int = 10,
+ moving_average: int = 25,
+ autocorrelation_factor: int = 3,
+ **kwargs,
+ ):
+ # time series specific configuration
+ self.prediction_length = prediction_length
+ self.context_length = context_length if context_length is not None else prediction_length
+ self.distribution_output = distribution_output
+ self.loss = loss
+ self.input_size = input_size
+ self.num_time_features = num_time_features
+ self.lags_sequence = lags_sequence
+ self.scaling = scaling
+ self.num_dynamic_real_features = num_dynamic_real_features
+ self.num_static_real_features = num_static_real_features
+ self.num_static_categorical_features = num_static_categorical_features
+ if cardinality is not None and num_static_categorical_features > 0:
+ if len(cardinality) != num_static_categorical_features:
+ raise ValueError(
+ "The cardinality should be a list of the same length as `num_static_categorical_features`"
+ )
+ self.cardinality = cardinality
+ else:
+ self.cardinality = [0]
+ if embedding_dimension is not None and num_static_categorical_features > 0:
+ if len(embedding_dimension) != num_static_categorical_features:
+ raise ValueError(
+ "The embedding dimension should be a list of the same length as `num_static_categorical_features`"
+ )
+ self.embedding_dimension = embedding_dimension
+ else:
+ self.embedding_dimension = [min(50, (cat + 1) // 2) for cat in self.cardinality]
+ self.num_parallel_samples = num_parallel_samples
+
+ # Transformer architecture configuration
+ self.feature_size = input_size * len(self.lags_sequence) + self._number_of_features
+ self.d_model = d_model
+ self.encoder_attention_heads = encoder_attention_heads
+ self.decoder_attention_heads = decoder_attention_heads
+ self.encoder_ffn_dim = encoder_ffn_dim
+ self.decoder_ffn_dim = decoder_ffn_dim
+ self.encoder_layers = encoder_layers
+ self.decoder_layers = decoder_layers
+
+ self.dropout = dropout
+ self.attention_dropout = attention_dropout
+ self.activation_dropout = activation_dropout
+ self.encoder_layerdrop = encoder_layerdrop
+ self.decoder_layerdrop = decoder_layerdrop
+
+ self.activation_function = activation_function
+ self.init_std = init_std
+
+ self.use_cache = use_cache
+
+ # Autoformer
+ self.label_length = label_length
+ self.moving_average = moving_average
+ self.autocorrelation_factor = autocorrelation_factor
+
+ super().__init__(is_encoder_decoder=is_encoder_decoder, **kwargs)
+
+ @property
+ def _number_of_features(self) -> int:
+ return (
+ sum(self.embedding_dimension)
+ + self.num_dynamic_real_features
+ + self.num_time_features
+ + self.num_static_real_features
+ + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
+ )
diff --git a/janus/lib/python3.10/site-packages/transformers/models/autoformer/modeling_autoformer.py b/janus/lib/python3.10/site-packages/transformers/models/autoformer/modeling_autoformer.py
new file mode 100644
index 0000000000000000000000000000000000000000..5a5b5f24397be1553db6aaf7c5d81555a1b44e83
--- /dev/null
+++ b/janus/lib/python3.10/site-packages/transformers/models/autoformer/modeling_autoformer.py
@@ -0,0 +1,2152 @@
+# coding=utf-8
+# Copyright (c) 2021 THUML @ Tsinghua University
+# Copyright 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""PyTorch Autoformer model."""
+
+import math
+from dataclasses import dataclass
+from typing import List, Optional, Tuple, Union
+
+import numpy as np
+import torch
+import torch.utils.checkpoint
+from torch import nn
+
+from ...activations import ACT2FN
+from ...modeling_attn_mask_utils import _prepare_4d_attention_mask
+from ...modeling_outputs import (
+ BaseModelOutput,
+ ModelOutput,
+ SampleTSPredictionOutput,
+ Seq2SeqTSPredictionOutput,
+)
+from ...modeling_utils import PreTrainedModel
+from ...time_series_utils import NegativeBinomialOutput, NormalOutput, StudentTOutput
+from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings
+from .configuration_autoformer import AutoformerConfig
+
+
+logger = logging.get_logger(__name__)
+
+_CONFIG_FOR_DOC = "AutoformerConfig"
+
+
+@dataclass
+class AutoFormerDecoderOutput(ModelOutput):
+ """
+ Base class for model's outputs that may also contain a past key/values (to speed up sequential decoding).
+
+ Args:
+ last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
+ Sequence of hidden-states at the output of the last layer of the model.
+
+ If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1,
+ hidden_size)` is output.
+ trend (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
+ Trend tensor for each time series.
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and optionally if
+ `config.is_encoder_decoder=True` 2 additional tensors of shape `(batch_size, num_heads,
+ encoder_sequence_length, embed_size_per_head)`.
+
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if
+ `config.is_encoder_decoder=True` in the cross-attention blocks) that can be used (see `past_key_values`
+ input) to speed up sequential decoding.
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` and `config.add_cross_attention=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the
+ weighted average in the cross-attention heads.
+ """
+
+ last_hidden_state: torch.FloatTensor = None
+ trend: torch.FloatTensor = None
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
+ cross_attentions: Optional[Tuple[torch.FloatTensor]] = None
+
+
+@dataclass
+class AutoformerModelOutput(ModelOutput):
+ """
+ Autoformer model output that contains the additional trend output.
+
+ Args:
+ last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
+ Sequence of hidden-states at the output of the last layer of the decoder of the model.
+
+ If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1,
+ hidden_size)` is output.
+ trend (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
+ Trend tensor for each time series.
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
+ `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
+
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
+ blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
+ decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the decoder at the output of each layer plus the optional initial embedding outputs.
+ decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the
+ self-attention heads.
+ cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the
+ weighted average in the cross-attention heads.
+ encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Sequence of hidden-states at the output of the last layer of the encoder of the model.
+ encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the encoder at the output of each layer plus the optional initial embedding outputs.
+ encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the
+ self-attention heads.
+ loc (`torch.FloatTensor` of shape `(batch_size,)` or `(batch_size, input_size)`, *optional*):
+ Shift values of each time series' context window which is used to give the model inputs of the same
+ magnitude and then used to shift back to the original magnitude.
+ scale (`torch.FloatTensor` of shape `(batch_size,)` or `(batch_size, input_size)`, *optional*):
+ Scaling values of each time series' context window which is used to give the model inputs of the same
+ magnitude and then used to rescale back to the original magnitude.
+ static_features: (`torch.FloatTensor` of shape `(batch_size, feature size)`, *optional*):
+ Static features of each time series' in a batch which are copied to the covariates at inference time.
+ """
+
+ last_hidden_state: torch.FloatTensor = None
+ trend: torch.FloatTensor = None
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
+ decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+ decoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
+ cross_attentions: Optional[Tuple[torch.FloatTensor]] = None
+ encoder_last_hidden_state: Optional[torch.FloatTensor] = None
+ encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+ encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
+ loc: Optional[torch.FloatTensor] = None
+ scale: Optional[torch.FloatTensor] = None
+ static_features: Optional[torch.FloatTensor] = None
+
+
+# Copied from transformers.models.time_series_transformer.modeling_time_series_transformer.TimeSeriesFeatureEmbedder with TimeSeries->Autoformer
+class AutoformerFeatureEmbedder(nn.Module):
+ """
+ Embed a sequence of categorical features.
+
+ Args:
+ cardinalities (`list[int]`):
+ List of cardinalities of the categorical features.
+ embedding_dims (`list[int]`):
+ List of embedding dimensions of the categorical features.
+ """
+
+ def __init__(self, cardinalities: List[int], embedding_dims: List[int]) -> None:
+ super().__init__()
+
+ self.num_features = len(cardinalities)
+ self.embedders = nn.ModuleList([nn.Embedding(c, d) for c, d in zip(cardinalities, embedding_dims)])
+
+ def forward(self, features: torch.Tensor) -> torch.Tensor:
+ if self.num_features > 1:
+ # we slice the last dimension, giving an array of length
+ # self.num_features with shape (N,T) or (N)
+ cat_feature_slices = torch.chunk(features, self.num_features, dim=-1)
+ else:
+ cat_feature_slices = [features]
+
+ return torch.cat(
+ [
+ embed(cat_feature_slice.squeeze(-1))
+ for embed, cat_feature_slice in zip(self.embedders, cat_feature_slices)
+ ],
+ dim=-1,
+ )
+
+
+# Copied from transformers.models.time_series_transformer.modeling_time_series_transformer.TimeSeriesStdScaler with TimeSeriesTransformer->Autoformer,TimeSeries->Autoformer
+class AutoformerStdScaler(nn.Module):
+ """
+ Standardize features by calculating the mean and scaling along the first dimension, and then normalizes it by
+ subtracting from the mean and dividing by the standard deviation.
+ """
+
+ def __init__(self, config: AutoformerConfig):
+ super().__init__()
+ self.dim = config.scaling_dim if hasattr(config, "scaling_dim") else 1
+ self.keepdim = config.keepdim if hasattr(config, "keepdim") else True
+ self.minimum_scale = config.minimum_scale if hasattr(config, "minimum_scale") else 1e-5
+
+ def forward(
+ self, data: torch.Tensor, observed_indicator: torch.Tensor
+ ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
+ """
+ Parameters:
+ data (`torch.Tensor` of shape `(batch_size, sequence_length, num_input_channels)`):
+ input for Batch norm calculation
+ observed_indicator (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`):
+ Calculating the scale on the observed indicator.
+ Returns:
+ tuple of `torch.Tensor` of shapes
+ (`(batch_size, sequence_length, num_input_channels)`,`(batch_size, 1, num_input_channels)`,
+ `(batch_size, 1, num_input_channels)`)
+ """
+ denominator = observed_indicator.sum(self.dim, keepdim=self.keepdim)
+ denominator = denominator.clamp_min(1.0)
+ loc = (data * observed_indicator).sum(self.dim, keepdim=self.keepdim) / denominator
+
+ variance = (((data - loc) * observed_indicator) ** 2).sum(self.dim, keepdim=self.keepdim) / denominator
+ scale = torch.sqrt(variance + self.minimum_scale)
+ return (data - loc) / scale, loc, scale
+
+
+# Copied from transformers.models.time_series_transformer.modeling_time_series_transformer.TimeSeriesMeanScaler with TimeSeriesTransformer->Autoformer,TimeSeries->Autoformer
+class AutoformerMeanScaler(nn.Module):
+ """
+ Computes a scaling factor as the weighted average absolute value along the first dimension, and scales the data
+ accordingly.
+ """
+
+ def __init__(self, config: AutoformerConfig):
+ super().__init__()
+ self.dim = config.scaling_dim if hasattr(config, "scaling_dim") else 1
+ self.keepdim = config.keepdim if hasattr(config, "keepdim") else True
+ self.minimum_scale = config.minimum_scale if hasattr(config, "minimum_scale") else 1e-10
+ self.default_scale = config.default_scale if hasattr(config, "default_scale") else None
+
+ def forward(
+ self, data: torch.Tensor, observed_indicator: torch.Tensor
+ ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
+ """
+ Parameters:
+ data (`torch.Tensor` of shape `(batch_size, sequence_length, num_input_channels)`):
+ input for Batch norm calculation
+ observed_indicator (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`):
+ Calculating the scale on the observed indicator.
+ Returns:
+ tuple of `torch.Tensor` of shapes
+ (`(batch_size, sequence_length, num_input_channels)`,`(batch_size, 1, num_input_channels)`,
+ `(batch_size, 1, num_input_channels)`)
+ """
+ ts_sum = (data * observed_indicator).abs().sum(self.dim, keepdim=True)
+ num_observed = observed_indicator.sum(self.dim, keepdim=True)
+
+ scale = ts_sum / torch.clamp(num_observed, min=1)
+
+ # If `default_scale` is provided, we use it, otherwise we use the scale
+ # of the batch.
+ if self.default_scale is None:
+ batch_sum = ts_sum.sum(dim=0)
+ batch_observations = torch.clamp(num_observed.sum(0), min=1)
+ default_scale = torch.squeeze(batch_sum / batch_observations)
+ else:
+ default_scale = self.default_scale * torch.ones_like(scale)
+
+ # apply default scale where there are no observations
+ scale = torch.where(num_observed > 0, scale, default_scale)
+
+ # ensure the scale is at least `self.minimum_scale`
+ scale = torch.clamp(scale, min=self.minimum_scale)
+ scaled_data = data / scale
+
+ if not self.keepdim:
+ scale = scale.squeeze(dim=self.dim)
+
+ return scaled_data, torch.zeros_like(scale), scale
+
+
+# Copied from transformers.models.time_series_transformer.modeling_time_series_transformer.TimeSeriesNOPScaler with TimeSeriesTransformer->Autoformer,TimeSeries->Autoformer
+class AutoformerNOPScaler(nn.Module):
+ """
+ Assigns a scaling factor equal to 1 along the first dimension, and therefore applies no scaling to the input data.
+ """
+
+ def __init__(self, config: AutoformerConfig):
+ super().__init__()
+ self.dim = config.scaling_dim if hasattr(config, "scaling_dim") else 1
+ self.keepdim = config.keepdim if hasattr(config, "keepdim") else True
+
+ def forward(
+ self, data: torch.Tensor, observed_indicator: torch.Tensor = None
+ ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
+ """
+ Parameters:
+ data (`torch.Tensor` of shape `(batch_size, sequence_length, num_input_channels)`):
+ input for Batch norm calculation
+ Returns:
+ tuple of `torch.Tensor` of shapes
+ (`(batch_size, sequence_length, num_input_channels)`,`(batch_size, 1, num_input_channels)`,
+ `(batch_size, 1, num_input_channels)`)
+ """
+ scale = torch.ones_like(data, requires_grad=False).mean(dim=self.dim, keepdim=self.keepdim)
+ loc = torch.zeros_like(data, requires_grad=False).mean(dim=self.dim, keepdim=self.keepdim)
+ return data, loc, scale
+
+
+# Copied from transformers.models.time_series_transformer.modeling_time_series_transformer.weighted_average
+def weighted_average(input_tensor: torch.Tensor, weights: Optional[torch.Tensor] = None, dim=None) -> torch.Tensor:
+ """
+ Computes the weighted average of a given tensor across a given `dim`, masking values associated with weight zero,
+ meaning instead of `nan * 0 = nan` you will get `0 * 0 = 0`.
+
+ Args:
+ input_tensor (`torch.FloatTensor`):
+ Input tensor, of which the average must be computed.
+ weights (`torch.FloatTensor`, *optional*):
+ Weights tensor, of the same shape as `input_tensor`.
+ dim (`int`, *optional*):
+ The dim along which to average `input_tensor`.
+
+ Returns:
+ `torch.FloatTensor`: The tensor with values averaged along the specified `dim`.
+ """
+ if weights is not None:
+ weighted_tensor = torch.where(weights != 0, input_tensor * weights, torch.zeros_like(input_tensor))
+ sum_weights = torch.clamp(weights.sum(dim=dim) if dim else weights.sum(), min=1.0)
+ return (weighted_tensor.sum(dim=dim) if dim else weighted_tensor.sum()) / sum_weights
+ else:
+ return input_tensor.mean(dim=dim)
+
+
+# Copied from transformers.models.time_series_transformer.modeling_time_series_transformer.nll
+def nll(input: torch.distributions.Distribution, target: torch.Tensor) -> torch.Tensor:
+ """
+ Computes the negative log likelihood loss from input distribution with respect to target.
+ """
+ return -input.log_prob(target)
+
+
+# Copied from transformers.models.marian.modeling_marian.MarianSinusoidalPositionalEmbedding with Marian->Autoformer
+class AutoformerSinusoidalPositionalEmbedding(nn.Embedding):
+ """This module produces sinusoidal positional embeddings of any length."""
+
+ def __init__(self, num_positions: int, embedding_dim: int, padding_idx: Optional[int] = None) -> None:
+ super().__init__(num_positions, embedding_dim)
+ self.weight = self._init_weight(self.weight)
+
+ @staticmethod
+ def _init_weight(out: nn.Parameter) -> nn.Parameter:
+ """
+ Identical to the XLM create_sinusoidal_embeddings except features are not interleaved. The cos features are in
+ the 2nd half of the vector. [dim // 2:]
+ """
+ n_pos, dim = out.shape
+ position_enc = np.array(
+ [[pos / np.power(10000, 2 * (j // 2) / dim) for j in range(dim)] for pos in range(n_pos)]
+ )
+ out.requires_grad = False # set early to avoid an error in pytorch-1.8+
+ sentinel = dim // 2 if dim % 2 == 0 else (dim // 2) + 1
+ out[:, 0:sentinel] = torch.FloatTensor(np.sin(position_enc[:, 0::2]))
+ out[:, sentinel:] = torch.FloatTensor(np.cos(position_enc[:, 1::2]))
+ out.detach_()
+ return out
+
+ @torch.no_grad()
+ def forward(self, input_ids_shape: torch.Size, past_key_values_length: int = 0) -> torch.Tensor:
+ """`input_ids_shape` is expected to be [bsz x seqlen]."""
+ bsz, seq_len = input_ids_shape[:2]
+ positions = torch.arange(
+ past_key_values_length, past_key_values_length + seq_len, dtype=torch.long, device=self.weight.device
+ )
+ return super().forward(positions)
+
+
+# Copied from transformers.models.time_series_transformer.modeling_time_series_transformer.TimeSeriesValueEmbedding with TimeSeries->Autoformer
+class AutoformerValueEmbedding(nn.Module):
+ def __init__(self, feature_size, d_model):
+ super().__init__()
+ self.value_projection = nn.Linear(in_features=feature_size, out_features=d_model, bias=False)
+
+ def forward(self, x):
+ return self.value_projection(x)
+
+
+# Class based on
+# https://github.com/thuml/Autoformer/blob/c6a0694ff484753f2d986cc0bb1f99ee850fc1a8/layers/Autoformer_EncDec.py#L39
+# where AutoformerSeriesDecompositionLayer is series_decomp + moving_average
+class AutoformerSeriesDecompositionLayer(nn.Module):
+ """
+ Returns the trend and the seasonal parts of the time series. Calculated as:
+
+ x_trend = AvgPool(Padding(X)) and x_seasonal = X - x_trend
+ """
+
+ def __init__(self, config: AutoformerConfig):
+ super().__init__()
+ self.kernel_size = config.moving_average
+ self.avg = nn.AvgPool1d(kernel_size=self.kernel_size, stride=1, padding=0)
+
+ def forward(self, x):
+ """Input shape: Batch x Time x EMBED_DIM"""
+ # padding on the both ends of time series
+ num_of_pads = (self.kernel_size - 1) // 2
+ front = x[:, 0:1, :].repeat(1, num_of_pads, 1)
+ end = x[:, -1:, :].repeat(1, num_of_pads, 1)
+ x_padded = torch.cat([front, x, end], dim=1)
+
+ # calculate the trend and seasonal part of the series
+ x_trend = self.avg(x_padded.permute(0, 2, 1)).permute(0, 2, 1)
+ x_seasonal = x - x_trend
+ return x_seasonal, x_trend
+
+
+# Class based on
+# https://github.com/thuml/Autoformer/blob/c6a0694ff484753f2d986cc0bb1f99ee850fc1a8/layers/Autoformer_EncDec.py#L6
+# where AutoformerLayernorm is my_Layernorm
+class AutoformerLayernorm(nn.Module):
+ """
+ Special designed layer normalization for the seasonal part, calculated as: AutoformerLayernorm(x) = nn.LayerNorm(x)
+ - torch.mean(nn.LayerNorm(x))
+ """
+
+ def __init__(self, config: AutoformerConfig):
+ super().__init__()
+ self.layernorm = nn.LayerNorm(config.d_model)
+
+ def forward(self, x):
+ x_hat = self.layernorm(x)
+ bias = torch.mean(x_hat, dim=1).unsqueeze(1).repeat(1, x.shape[1], 1)
+ return x_hat - bias
+
+
+class AutoformerAttention(nn.Module):
+ """
+ AutoCorrelation Mechanism with the following two phases:
+ (1) period-based dependencies discovery (2) time delay aggregation
+ This block replace the canonical self-attention mechanism.
+ """
+
+ def __init__(
+ self,
+ embed_dim: int,
+ num_heads: int,
+ dropout: float = 0.0,
+ is_decoder: bool = False,
+ bias: bool = True,
+ autocorrelation_factor: int = 3,
+ ):
+ super().__init__()
+ self.embed_dim = embed_dim
+ self.num_heads = num_heads
+ self.dropout = dropout
+ self.head_dim = embed_dim // num_heads
+
+ if (self.head_dim * num_heads) != self.embed_dim:
+ raise ValueError(
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
+ f" and `num_heads`: {num_heads})."
+ )
+ self.scaling = self.head_dim**-0.5
+ self.is_decoder = is_decoder
+
+ self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
+ self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
+ self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
+ self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
+
+ self.autocorrelation_factor = autocorrelation_factor
+
+ def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
+ return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ key_value_states: Optional[torch.Tensor] = None,
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ layer_head_mask: Optional[torch.Tensor] = None,
+ output_attentions: bool = False,
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
+ """Input shape: Batch x Time x Channel"""
+
+ # if key_value_states are provided this layer is used as a cross-attention layer
+ # for the decoder
+ is_cross_attention = key_value_states is not None
+
+ bsz, tgt_len, _ = hidden_states.size()
+
+ # get query proj
+ query_states = self.q_proj(hidden_states)
+ # get key, value proj
+ # `past_key_value[0].shape[2] == key_value_states.shape[1]`
+ # is checking that the `sequence_length` of the `past_key_value` is the same as
+ # the provided `key_value_states` to support prefix tuning
+ if (
+ is_cross_attention
+ and past_key_value is not None
+ and past_key_value[0].shape[2] == key_value_states.shape[1]
+ ):
+ # reuse k,v, cross_attentions
+ key_states = past_key_value[0]
+ value_states = past_key_value[1]
+ elif is_cross_attention:
+ # cross_attentions
+ key_states = self._shape(self.k_proj(key_value_states), -1, bsz)
+ value_states = self._shape(self.v_proj(key_value_states), -1, bsz)
+ elif past_key_value is not None:
+ # reuse k, v, self_attention
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
+ key_states = torch.cat([past_key_value[0], key_states], dim=2)
+ value_states = torch.cat([past_key_value[1], value_states], dim=2)
+ else:
+ # self_attention
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
+
+ if self.is_decoder:
+ # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
+ # Further calls to cross_attention layer can then reuse all cross-attention
+ # key/value_states (first "if" case)
+ # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
+ past_key_value = (key_states, value_states)
+
+ proj_shape = (bsz * self.num_heads, -1, self.head_dim)
+ query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
+ key_states = key_states.view(*proj_shape)
+ value_states = value_states.view(*proj_shape)
+
+ # (1) period-based dependencies discovery
+ # Resize (truncation or zero filling)
+ queries_time_length = query_states.size(1)
+ values_time_length = value_states.size(1)
+ if queries_time_length > values_time_length:
+ query_states = query_states[:, : (queries_time_length - values_time_length), :]
+ zeros = torch.zeros_like(query_states).float()
+ value_states = torch.cat([value_states, zeros], dim=1)
+ key_states = torch.cat([key_states, zeros], dim=1)
+ else:
+ value_states = value_states[:, :queries_time_length, :]
+ key_states = key_states[:, :queries_time_length, :]
+
+ query_states_fft = torch.fft.rfft(query_states, n=tgt_len, dim=1)
+ key_states_fft = torch.fft.rfft(key_states, n=tgt_len, dim=1)
+ attn_weights = query_states_fft * torch.conj(key_states_fft)
+ attn_weights = torch.fft.irfft(attn_weights, n=tgt_len, dim=1) # Autocorrelation(Q,K)
+
+ src_len = key_states.size(1)
+ channel = key_states.size(2)
+
+ if attn_weights.size() != (bsz * self.num_heads, tgt_len, channel):
+ raise ValueError(
+ f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, channel)}, but is"
+ f" {attn_weights.size()}"
+ )
+
+ if attention_mask is not None:
+ if attention_mask.size() != (bsz, 1, tgt_len, src_len):
+ raise ValueError(
+ f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}"
+ )
+ attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask
+ attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
+
+ if layer_head_mask is not None:
+ if layer_head_mask.size() != (self.num_heads,):
+ raise ValueError(
+ f"Head mask for a single layer should be of size {(self.num_heads,)}, but is"
+ f" {layer_head_mask.size()}"
+ )
+ attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, channel)
+ attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, channel)
+
+ if output_attentions:
+ # this operation is a bit awkward, but it's required to
+ # make sure that attn_weights keeps its gradient.
+ # In order to do so, attn_weights have to be reshaped
+ # twice and have to be reused in the following
+ attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, channel)
+ attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, channel)
+ else:
+ attn_weights_reshaped = None
+
+ # time delay aggregation
+ time_length = value_states.size(1)
+ autocorrelations = attn_weights.view(bsz, self.num_heads, tgt_len, channel)
+
+ # find top k autocorrelations delays
+ top_k = int(self.autocorrelation_factor * math.log(time_length))
+ autocorrelations_mean_on_head_channel = torch.mean(autocorrelations, dim=(1, -1)) # bsz x tgt_len
+ if self.training:
+ autocorrelations_mean_on_bsz = torch.mean(autocorrelations_mean_on_head_channel, dim=0)
+ _, top_k_delays_index = torch.topk(autocorrelations_mean_on_bsz, top_k)
+ top_k_autocorrelations = torch.stack(
+ [autocorrelations_mean_on_head_channel[:, top_k_delays_index[i]] for i in range(top_k)], dim=-1
+ )
+ else:
+ top_k_autocorrelations, top_k_delays_index = torch.topk(
+ autocorrelations_mean_on_head_channel, top_k, dim=1
+ )
+
+ top_k_autocorrelations = torch.softmax(top_k_autocorrelations, dim=-1) # bsz x top_k
+
+ # compute aggregation: value_states.roll(delay) * top_k_autocorrelations(delay)
+ if not self.training:
+ # used for compute values_states.roll(delay) in inference
+ tmp_values = value_states.repeat(1, 2, 1)
+ init_index = (
+ torch.arange(time_length)
+ .view(1, -1, 1)
+ .repeat(bsz * self.num_heads, 1, channel)
+ .to(value_states.device)
+ )
+
+ delays_agg = torch.zeros_like(value_states).float() # bsz x time_length x channel
+ for i in range(top_k):
+ # compute value_states roll delay
+ if not self.training:
+ tmp_delay = init_index + top_k_delays_index[:, i].view(-1, 1, 1).repeat(
+ self.num_heads, tgt_len, channel
+ )
+ value_states_roll_delay = torch.gather(tmp_values, dim=1, index=tmp_delay)
+ else:
+ value_states_roll_delay = value_states.roll(shifts=-int(top_k_delays_index[i]), dims=1)
+
+ # aggregation
+ top_k_autocorrelations_at_delay = (
+ top_k_autocorrelations[:, i].view(-1, 1, 1).repeat(self.num_heads, tgt_len, channel)
+ )
+ delays_agg += value_states_roll_delay * top_k_autocorrelations_at_delay
+
+ attn_output = delays_agg.contiguous()
+
+ if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
+ raise ValueError(
+ f"`attn_output` should be of size {(bsz * self.num_heads, tgt_len, self.head_dim)}, but is"
+ f" {attn_output.size()}"
+ )
+
+ attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
+ attn_output = attn_output.transpose(1, 2)
+
+ # Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be
+ # partitioned across GPUs when using tensor-parallelism.
+ attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim)
+
+ attn_output = self.out_proj(attn_output)
+
+ return attn_output, attn_weights_reshaped, past_key_value
+
+
+class AutoformerEncoderLayer(nn.Module):
+ def __init__(self, config: AutoformerConfig):
+ super().__init__()
+ self.embed_dim = config.d_model
+ self.self_attn = AutoformerAttention(
+ embed_dim=self.embed_dim,
+ num_heads=config.encoder_attention_heads,
+ dropout=config.attention_dropout,
+ autocorrelation_factor=config.autocorrelation_factor,
+ )
+ self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
+ self.dropout = config.dropout
+ self.activation_fn = ACT2FN[config.activation_function]
+ self.activation_dropout = config.activation_dropout
+ self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim)
+ self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim)
+ self.final_layer_norm = AutoformerLayernorm(config)
+ self.decomp1 = AutoformerSeriesDecompositionLayer(config)
+ self.decomp2 = AutoformerSeriesDecompositionLayer(config)
+
+ def forward(
+ self,
+ hidden_states: torch.FloatTensor,
+ attention_mask: torch.FloatTensor,
+ layer_head_mask: torch.FloatTensor,
+ output_attentions: Optional[bool] = False,
+ ) -> Tuple[torch.FloatTensor, Optional[torch.FloatTensor]]:
+ """
+ Args:
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
+ attention_mask (`torch.FloatTensor`): attention mask of size
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
+ layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size
+ `(encoder_attention_heads,)`.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ """
+ residual = hidden_states
+ hidden_states, attn_weights, _ = self.self_attn(
+ hidden_states=hidden_states,
+ attention_mask=attention_mask,
+ layer_head_mask=layer_head_mask,
+ output_attentions=output_attentions,
+ )
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
+ hidden_states = residual + hidden_states
+ # added layer norm here as an improvement
+ hidden_states = self.self_attn_layer_norm(hidden_states)
+ hidden_states, _ = self.decomp1(hidden_states)
+
+ residual = hidden_states
+ hidden_states = self.activation_fn(self.fc1(hidden_states))
+ hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
+ hidden_states = self.fc2(hidden_states)
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
+ hidden_states = residual + hidden_states
+ hidden_states, _ = self.decomp2(hidden_states)
+ hidden_states = self.final_layer_norm(hidden_states)
+
+ if hidden_states.dtype == torch.float16 and (
+ torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any()
+ ):
+ clamp_value = torch.finfo(hidden_states.dtype).max - 1000
+ hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
+
+ outputs = (hidden_states,)
+
+ if output_attentions:
+ outputs += (attn_weights,)
+
+ return outputs
+
+
+class AutoformerDecoderLayer(nn.Module):
+ def __init__(self, config: AutoformerConfig):
+ super().__init__()
+ self.embed_dim = config.d_model
+
+ self.self_attn = AutoformerAttention(
+ embed_dim=self.embed_dim,
+ num_heads=config.decoder_attention_heads,
+ dropout=config.attention_dropout,
+ is_decoder=True,
+ autocorrelation_factor=config.autocorrelation_factor,
+ )
+ self.dropout = config.dropout
+ self.activation_fn = ACT2FN[config.activation_function]
+ self.activation_dropout = config.activation_dropout
+
+ self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
+ self.encoder_attn = AutoformerAttention(
+ self.embed_dim,
+ config.decoder_attention_heads,
+ dropout=config.attention_dropout,
+ is_decoder=True,
+ autocorrelation_factor=config.autocorrelation_factor,
+ )
+ self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim)
+ self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim)
+ self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim)
+ self.final_layer_norm = AutoformerLayernorm(config)
+
+ self.decomp1 = AutoformerSeriesDecompositionLayer(config)
+ self.decomp2 = AutoformerSeriesDecompositionLayer(config)
+ self.decomp3 = AutoformerSeriesDecompositionLayer(config)
+
+ # source: https://github.com/thuml/Autoformer/blob/e6371e24f2ae2dd53e472edefdd5814c5176f864/layers/Autoformer_EncDec.py#L128
+ self.trend_projection = nn.Conv1d(
+ in_channels=self.embed_dim,
+ out_channels=config.feature_size,
+ kernel_size=3,
+ stride=1,
+ padding=1,
+ padding_mode="circular",
+ bias=False,
+ )
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.Tensor] = None,
+ encoder_hidden_states: Optional[torch.Tensor] = None,
+ encoder_attention_mask: Optional[torch.Tensor] = None,
+ layer_head_mask: Optional[torch.Tensor] = None,
+ cross_attn_layer_head_mask: Optional[torch.Tensor] = None,
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
+ output_attentions: Optional[bool] = False,
+ use_cache: Optional[bool] = True,
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
+ """
+ Args:
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
+ attention_mask (`torch.FloatTensor`): attention mask of size
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
+ encoder_hidden_states (`torch.FloatTensor`):
+ cross attention input to the layer of shape `(batch, seq_len, embed_dim)`
+ encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
+ layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size
+ `(encoder_attention_heads,)`.
+ cross_attn_layer_head_mask (`torch.FloatTensor`): mask for cross-attention heads in a given layer of
+ size `(decoder_attention_heads,)`.
+ past_key_value (`Tuple(torch.FloatTensor)`): cached past key and value projection states
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ use_cache: (`bool`, *optional*, defaults to `True`):
+ Whether or not the model should return the `present_key_value` state to be used for subsequent
+ decoding.
+ """
+ residual = hidden_states
+
+ # Self Attention
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
+ # add present self-attn cache to positions 1,2 of present_key_value tuple
+ hidden_states, self_attn_weights, present_key_value = self.self_attn(
+ hidden_states=hidden_states,
+ past_key_value=self_attn_past_key_value,
+ attention_mask=attention_mask,
+ layer_head_mask=layer_head_mask,
+ output_attentions=output_attentions,
+ )
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
+ hidden_states = residual + hidden_states
+ hidden_states, trend1 = self.decomp1(hidden_states)
+ # added layer norm here as an improvement
+ hidden_states = self.self_attn_layer_norm(hidden_states)
+
+ # Cross-Attention Block
+ cross_attn_present_key_value = None
+ cross_attn_weights = None
+ if encoder_hidden_states is not None:
+ residual = hidden_states
+
+ # cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple
+ cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
+ hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn(
+ hidden_states=hidden_states,
+ key_value_states=encoder_hidden_states,
+ attention_mask=encoder_attention_mask,
+ layer_head_mask=cross_attn_layer_head_mask,
+ past_key_value=cross_attn_past_key_value,
+ output_attentions=output_attentions,
+ )
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
+ hidden_states = residual + hidden_states
+ hidden_states, trend2 = self.decomp2(hidden_states)
+ # added layer norm here as an improvement
+ hidden_states = self.encoder_attn_layer_norm(hidden_states)
+
+ # add cross-attn to positions 3,4 of present_key_value tuple
+ present_key_value = present_key_value + cross_attn_present_key_value
+
+ # Fully Connected
+ residual = hidden_states
+ hidden_states = self.activation_fn(self.fc1(hidden_states))
+ hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
+ hidden_states = self.fc2(hidden_states)
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
+ hidden_states = residual + hidden_states
+ hidden_states, trend3 = self.decomp3(hidden_states)
+ hidden_states = self.final_layer_norm(hidden_states)
+
+ if encoder_hidden_states is not None:
+ residual_trend = trend1 + trend2 + trend3
+ else:
+ residual_trend = trend1 + trend3
+ residual_trend = self.trend_projection(residual_trend.permute(0, 2, 1)).transpose(1, 2)
+ outputs = ((hidden_states, residual_trend),)
+
+ if output_attentions:
+ outputs += (self_attn_weights, cross_attn_weights)
+
+ if use_cache:
+ outputs += (present_key_value,)
+
+ return outputs
+
+
+class AutoformerPreTrainedModel(PreTrainedModel):
+ config_class = AutoformerConfig
+ base_model_prefix = "model"
+ main_input_name = "past_values"
+ supports_gradient_checkpointing = True
+
+ def _init_weights(self, module):
+ std = self.config.init_std
+ if isinstance(module, (nn.Linear, nn.Conv1d)):
+ module.weight.data.normal_(mean=0.0, std=std)
+ if module.bias is not None:
+ module.bias.data.zero_()
+ elif isinstance(module, AutoformerSinusoidalPositionalEmbedding):
+ pass
+ elif isinstance(module, nn.Embedding):
+ module.weight.data.normal_(mean=0.0, std=std)
+ if module.padding_idx is not None:
+ module.weight.data[module.padding_idx].zero_()
+
+
+AUTOFORMER_START_DOCSTRING = r"""
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
+ and behavior.
+
+ Parameters:
+ config ([`AutoformerConfig`]):
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
+ load the weights associated with the model, only the configuration. Check out the
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+AUTOFORMER_INPUTS_DOCSTRING = r"""
+ Args:
+ past_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):
+ Past values of the time series, that serve as context in order to predict the future. These values may
+ contain lags, i.e. additional values from the past which are added in order to serve as "extra context".
+ The `past_values` is what the Transformer encoder gets as input (with optional additional features, such as
+ `static_categorical_features`, `static_real_features`, `past_time_features`).
+
+ The sequence length here is equal to `context_length` + `max(config.lags_sequence)`.
+
+ Missing values need to be replaced with zeros.
+
+ past_time_features (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_features)`, *optional*):
+ Optional time features, which the model internally will add to `past_values`. These could be things like
+ "month of year", "day of the month", etc. encoded as vectors (for instance as Fourier features). These
+ could also be so-called "age" features, which basically help the model know "at which point in life" a
+ time-series is. Age features have small values for distant past time steps and increase monotonically the
+ more we approach the current time step.
+
+ These features serve as the "positional encodings" of the inputs. So contrary to a model like BERT, where
+ the position encodings are learned from scratch internally as parameters of the model, the Time Series
+ Transformer requires to provide additional time features.
+
+ The Autoformer only learns additional embeddings for `static_categorical_features`.
+
+ past_observed_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Boolean mask to indicate which `past_values` were observed and which were missing. Mask values selected in
+ `[0, 1]`:
+
+ - 1 for values that are **observed**,
+ - 0 for values that are **missing** (i.e. NaNs that were replaced by zeros).
+
+ static_categorical_features (`torch.LongTensor` of shape `(batch_size, number of static categorical features)`, *optional*):
+ Optional static categorical features for which the model will learn an embedding, which it will add to the
+ values of the time series.
+
+ Static categorical features are features which have the same value for all time steps (static over time).
+
+ A typical example of a static categorical feature is a time series ID.
+
+ static_real_features (`torch.FloatTensor` of shape `(batch_size, number of static real features)`, *optional*):
+ Optional static real features which the model will add to the values of the time series.
+
+ Static real features are features which have the same value for all time steps (static over time).
+
+ A typical example of a static real feature is promotion information.
+
+ future_values (`torch.FloatTensor` of shape `(batch_size, prediction_length)`):
+ Future values of the time series, that serve as labels for the model. The `future_values` is what the
+ Transformer needs to learn to output, given the `past_values`.
+
+ See the demo notebook and code snippets for details.
+
+ Missing values need to be replaced with zeros.
+
+ future_time_features (`torch.FloatTensor` of shape `(batch_size, prediction_length, num_features)`, *optional*):
+ Optional time features, which the model internally will add to `future_values`. These could be things like
+ "month of year", "day of the month", etc. encoded as vectors (for instance as Fourier features). These
+ could also be so-called "age" features, which basically help the model know "at which point in life" a
+ time-series is. Age features have small values for distant past time steps and increase monotonically the
+ more we approach the current time step.
+
+ These features serve as the "positional encodings" of the inputs. So contrary to a model like BERT, where
+ the position encodings are learned from scratch internally as parameters of the model, the Time Series
+ Transformer requires to provide additional features.
+
+ The Autoformer only learns additional embeddings for `static_categorical_features`.
+
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on certain token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+
+ decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
+ Mask to avoid performing attention on certain token indices. By default, a causal mask will be used, to
+ make sure the model can only look at previous inputs in order to predict the future.
+
+ head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ decoder_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*):
+ Tuple consists of `last_hidden_state`, `hidden_states` (*optional*) and `attentions` (*optional*)
+ `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)` (*optional*) is a sequence of
+ hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
+ `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
+
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
+ blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
+
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
+ model's internal embedding lookup matrix.
+
+ use_cache (`bool`, *optional*):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
+ `past_key_values`).
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+# Copied from transformers.models.time_series_transformer.modeling_time_series_transformer.TimeSeriesTransformerEncoder with TimeSeriesTransformer->Autoformer,TimeSeries->Autoformer
+class AutoformerEncoder(AutoformerPreTrainedModel):
+ """
+ Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a
+ [`AutoformerEncoderLayer`].
+
+ Args:
+ config: AutoformerConfig
+ """
+
+ def __init__(self, config: AutoformerConfig):
+ super().__init__(config)
+
+ self.dropout = config.dropout
+ self.layerdrop = config.encoder_layerdrop
+ if config.prediction_length is None:
+ raise ValueError("The `prediction_length` config needs to be specified.")
+
+ self.value_embedding = AutoformerValueEmbedding(feature_size=config.feature_size, d_model=config.d_model)
+ self.embed_positions = AutoformerSinusoidalPositionalEmbedding(
+ config.context_length + config.prediction_length, config.d_model
+ )
+ self.layers = nn.ModuleList([AutoformerEncoderLayer(config) for _ in range(config.encoder_layers)])
+ self.layernorm_embedding = nn.LayerNorm(config.d_model)
+
+ self.gradient_checkpointing = False
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def forward(
+ self,
+ attention_mask: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, BaseModelOutput]:
+ r"""
+ Args:
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
+ This is useful if you want more control over how to convert `input_ids` indices into associated vectors
+ than the model's internal embedding lookup matrix.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
+ for more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+ """
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ hidden_states = self.value_embedding(inputs_embeds)
+ embed_pos = self.embed_positions(inputs_embeds.size())
+
+ hidden_states = self.layernorm_embedding(hidden_states + embed_pos)
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
+
+ # expand attention_mask
+ if attention_mask is not None:
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
+ attention_mask = _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype)
+
+ encoder_states = () if output_hidden_states else None
+ all_attentions = () if output_attentions else None
+
+ # check if head_mask has a correct number of layers specified if desired
+ if head_mask is not None:
+ if head_mask.size()[0] != (len(self.layers)):
+ raise ValueError(
+ f"The head_mask should be specified for {len(self.layers)} layers, but it is for"
+ f" {head_mask.size()[0]}."
+ )
+
+ for idx, encoder_layer in enumerate(self.layers):
+ if output_hidden_states:
+ encoder_states = encoder_states + (hidden_states,)
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
+ to_drop = False
+ if self.training:
+ dropout_probability = torch.rand([])
+ if dropout_probability < self.layerdrop: # skip the layer
+ to_drop = True
+
+ if to_drop:
+ layer_outputs = (None, None)
+ else:
+ if self.gradient_checkpointing and self.training:
+ layer_outputs = self._gradient_checkpointing_func(
+ encoder_layer.__call__,
+ hidden_states,
+ attention_mask,
+ (head_mask[idx] if head_mask is not None else None),
+ output_attentions,
+ )
+ else:
+ layer_outputs = encoder_layer(
+ hidden_states,
+ attention_mask,
+ layer_head_mask=(head_mask[idx] if head_mask is not None else None),
+ output_attentions=output_attentions,
+ )
+
+ hidden_states = layer_outputs[0]
+
+ if output_attentions:
+ all_attentions = all_attentions + (layer_outputs[1],)
+
+ if output_hidden_states:
+ encoder_states = encoder_states + (hidden_states,)
+
+ if not return_dict:
+ return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
+ return BaseModelOutput(
+ last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
+ )
+
+
+class AutoformerDecoder(AutoformerPreTrainedModel):
+ """
+ Transformer decoder consisting of `config.decoder_layers` layers. Each layer is a [`AutoformerDecoderLayer`]
+
+ Args:
+ config: AutoformerConfig
+ """
+
+ def __init__(self, config: AutoformerConfig):
+ super().__init__(config)
+ self.dropout = config.dropout
+ self.layerdrop = config.decoder_layerdrop
+ if config.prediction_length is None:
+ raise ValueError("The `prediction_length` config needs to be specified.")
+
+ self.value_embedding = AutoformerValueEmbedding(feature_size=config.feature_size, d_model=config.d_model)
+ self.embed_positions = AutoformerSinusoidalPositionalEmbedding(
+ config.context_length + config.prediction_length, config.d_model
+ )
+ self.layers = nn.ModuleList([AutoformerDecoderLayer(config) for _ in range(config.decoder_layers)])
+ self.layernorm_embedding = nn.LayerNorm(config.d_model)
+
+ # https://github.com/thuml/Autoformer/blob/e6371e24f2ae2dd53e472edefdd5814c5176f864/models/Autoformer.py#L74
+ self.seasonality_projection = nn.Linear(config.d_model, config.feature_size)
+
+ self.gradient_checkpointing = False
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def forward(
+ self,
+ trend: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
+ encoder_attention_mask: Optional[torch.LongTensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ cross_attn_head_mask: Optional[torch.Tensor] = None,
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, AutoFormerDecoderOutput]:
+ r"""
+ Args:
+ trend (`torch.FloatTensor` of shape `(batch_size, prediction_length, feature_size)`, *optional*):
+ The trend sequence to be fed to the decoder.
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*):
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
+ of the decoder.
+ encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*):
+ Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values
+ selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the cross-attention modules in the decoder to avoid performing
+ cross-attention on hidden heads. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
+ shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of
+ shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
+
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and in the
+ cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
+
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those
+ that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of
+ all `decoder_input_ids` of shape `(batch_size, sequence_length)`.
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
+ This is useful if you want more control over how to convert `input_ids` indices into associated vectors
+ than the model's internal embedding lookup matrix.
+ use_cache (`bool`, *optional*):
+ If `use_cache` is True, `past_key_values` key value states are returned and can be used to speed up
+ decoding (see `past_key_values`).
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
+ for more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+ """
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ input_shape = inputs_embeds.size()[:-1]
+
+ # expand encoder attention mask
+ if encoder_hidden_states is not None and encoder_attention_mask is not None:
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
+ encoder_attention_mask = _prepare_4d_attention_mask(
+ encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]
+ )
+
+ hidden_states = self.value_embedding(inputs_embeds)
+ embed_pos = self.embed_positions(
+ inputs_embeds.size(), past_key_values_length=self.config.context_length - self.config.label_length
+ )
+ hidden_states = self.layernorm_embedding(hidden_states + embed_pos)
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
+
+ # decoder layers
+ all_hidden_states = () if output_hidden_states else None
+ all_self_attns = () if output_attentions else None
+ all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None
+ next_decoder_cache = () if use_cache else None
+
+ # check if head_mask/cross_attn_head_mask has a correct number of layers specified if desired
+ for attn_mask, mask_name in zip([head_mask, cross_attn_head_mask], ["head_mask", "cross_attn_head_mask"]):
+ if attn_mask is not None:
+ if attn_mask.size()[0] != (len(self.layers)):
+ raise ValueError(
+ f"The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for"
+ f" {head_mask.size()[0]}."
+ )
+
+ for idx, decoder_layer in enumerate(self.layers):
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
+ if output_hidden_states:
+ all_hidden_states += (hidden_states,)
+ if self.training:
+ dropout_probability = torch.rand([])
+ if dropout_probability < self.layerdrop:
+ continue
+
+ past_key_value = past_key_values[idx] if past_key_values is not None else None
+
+ if self.gradient_checkpointing and self.training:
+ if use_cache:
+ logger.warning(
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
+ )
+ use_cache = False
+ layer_outputs = self._gradient_checkpointing_func(
+ decoder_layer.__call__,
+ hidden_states,
+ attention_mask,
+ encoder_hidden_states,
+ encoder_attention_mask,
+ head_mask[idx] if head_mask is not None else None,
+ cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None,
+ None,
+ output_attentions,
+ use_cache,
+ )
+ else:
+ layer_outputs = decoder_layer(
+ hidden_states,
+ attention_mask=attention_mask,
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=encoder_attention_mask,
+ layer_head_mask=(head_mask[idx] if head_mask is not None else None),
+ cross_attn_layer_head_mask=(
+ cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None
+ ),
+ past_key_value=past_key_value,
+ output_attentions=output_attentions,
+ use_cache=use_cache,
+ )
+ (hidden_states, residual_trend) = layer_outputs[0]
+ trend = trend + residual_trend
+
+ if use_cache:
+ next_decoder_cache += (layer_outputs[3 if output_attentions else 1],)
+
+ if output_attentions:
+ all_self_attns += (layer_outputs[1],)
+
+ if encoder_hidden_states is not None:
+ all_cross_attentions += (layer_outputs[2],)
+
+ # project seasonality representation
+ hidden_states = self.seasonality_projection(hidden_states)
+
+ # add hidden states from the last decoder layer
+ if output_hidden_states:
+ all_hidden_states += (hidden_states,)
+
+ next_cache = next_decoder_cache if use_cache else None
+ if not return_dict:
+ return tuple(
+ v
+ for v in [hidden_states, trend, next_cache, all_hidden_states, all_self_attns, all_cross_attentions]
+ if v is not None
+ )
+ return AutoFormerDecoderOutput(
+ last_hidden_state=hidden_states,
+ trend=trend,
+ past_key_values=next_cache,
+ hidden_states=all_hidden_states,
+ attentions=all_self_attns,
+ cross_attentions=all_cross_attentions,
+ )
+
+
+@add_start_docstrings(
+ "The bare Autoformer Model outputting raw hidden-states without any specific head on top.",
+ AUTOFORMER_START_DOCSTRING,
+)
+class AutoformerModel(AutoformerPreTrainedModel):
+ def __init__(self, config: AutoformerConfig):
+ super().__init__(config)
+
+ if config.scaling == "mean" or config.scaling is True:
+ self.scaler = AutoformerMeanScaler(config)
+ elif config.scaling == "std":
+ self.scaler = AutoformerStdScaler(config)
+ else:
+ self.scaler = AutoformerNOPScaler(config)
+
+ if config.num_static_categorical_features > 0:
+ self.embedder = AutoformerFeatureEmbedder(
+ cardinalities=config.cardinality, embedding_dims=config.embedding_dimension
+ )
+
+ # transformer encoder-decoder and mask initializer
+ self.encoder = AutoformerEncoder(config)
+ self.decoder = AutoformerDecoder(config)
+
+ # used for decoder seasonal and trend initialization
+ self.decomposition_layer = AutoformerSeriesDecompositionLayer(config)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @property
+ def _past_length(self) -> int:
+ return self.config.context_length + max(self.config.lags_sequence)
+
+ def get_lagged_subsequences(
+ self, sequence: torch.Tensor, subsequences_length: int, shift: int = 0
+ ) -> torch.Tensor:
+ """
+ Returns lagged subsequences of a given sequence. Returns a tensor of shape (batch_size, subsequences_length,
+ feature_size, indices_length), containing lagged subsequences. Specifically, lagged[i, j, :, k] = sequence[i,
+ -indices[k]-subsequences_length+j, :].
+
+ Args:
+ sequence (`torch.Tensor` or shape `(batch_size, context_length,
+ feature_size)`): The sequence from which lagged subsequences should be extracted.
+ subsequences_length (`int`):
+ Length of the subsequences to be extracted.
+ shift (`int`, *optional* defaults to 0):
+ Shift the lags by this amount back in the time index.
+ """
+
+ # calculates the indices of the lags by subtracting the shift value from the given lags_sequence
+ indices = [lag - shift for lag in self.config.lags_sequence]
+
+ # checks if the maximum lag plus the length of the subsequences exceeds the length of the input sequence
+ sequence_length = sequence.shape[1]
+ if max(indices) + subsequences_length > sequence_length:
+ raise ValueError(
+ f"lags cannot go further than history length, found lag {max(indices)} "
+ f"while history length is only {sequence_length}"
+ )
+
+ # extracts the lagged subsequences from the input sequence using the calculated indices
+ lagged_values = []
+ for lag_index in indices:
+ begin_index = -lag_index - subsequences_length
+ end_index = -lag_index if lag_index > 0 else None
+ lagged_values.append(sequence[:, begin_index:end_index, ...])
+
+ # return as stacked tensor in the feature dimension
+ return torch.stack(lagged_values, dim=-1)
+
+ def create_network_inputs(
+ self,
+ past_values: torch.Tensor,
+ past_time_features: torch.Tensor,
+ static_categorical_features: Optional[torch.Tensor] = None,
+ static_real_features: Optional[torch.Tensor] = None,
+ past_observed_mask: Optional[torch.Tensor] = None,
+ future_values: Optional[torch.Tensor] = None,
+ future_time_features: Optional[torch.Tensor] = None,
+ ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
+ """
+ Creates the inputs for the network given the past and future values, time features, and static features.
+
+ Args:
+ past_values (`torch.Tensor`):
+ A tensor of shape `(batch_size, past_length, input_size)` containing the past values.
+ past_time_features (`torch.Tensor`):
+ A tensor of shape `(batch_size, past_length, num_features)` containing the past time features.
+ static_categorical_features (`Optional[torch.Tensor]`):
+ An optional tensor of shape `(batch_size, num_categorical_features)` containing the static categorical
+ features.
+ static_real_features (`Optional[torch.Tensor]`):
+ An optional tensor of shape `(batch_size, num_real_features)` containing the static real features.
+ past_observed_mask (`Optional[torch.Tensor]`):
+ An optional tensor of shape `(batch_size, past_length, input_size)` containing the mask of observed
+ values in the past.
+ future_values (`Optional[torch.Tensor]`):
+ An optional tensor of shape `(batch_size, future_length, input_size)` containing the future values.
+
+ Returns:
+ A tuple containing the following tensors:
+ - reshaped_lagged_sequence (`torch.Tensor`): A tensor of shape `(batch_size, sequence_length, num_lags *
+ input_size)` containing the lagged subsequences of the inputs.
+ - features (`torch.Tensor`): A tensor of shape `(batch_size, sequence_length, num_features)` containing the
+ concatenated static and time features.
+ - loc (`torch.Tensor`): A tensor of shape `(batch_size, input_size)` containing the mean of the input
+ values.
+ - scale (`torch.Tensor`): A tensor of shape `(batch_size, input_size)` containing the std of the input
+ values.
+ - static_feat (`torch.Tensor`): A tensor of shape `(batch_size, num_static_features)` containing the
+ concatenated static features.
+ """
+ # time feature
+ time_feat = (
+ torch.cat(
+ (
+ past_time_features[:, self._past_length - self.config.context_length :, ...],
+ future_time_features,
+ ),
+ dim=1,
+ )
+ if future_values is not None
+ else past_time_features[:, self._past_length - self.config.context_length :, ...]
+ )
+
+ # target
+ if past_observed_mask is None:
+ past_observed_mask = torch.ones_like(past_values)
+
+ context = past_values[:, -self.config.context_length :]
+ observed_context = past_observed_mask[:, -self.config.context_length :]
+ _, loc, scale = self.scaler(context, observed_context)
+
+ inputs = (
+ (torch.cat((past_values, future_values), dim=1) - loc) / scale
+ if future_values is not None
+ else (past_values - loc) / scale
+ )
+
+ # static features
+ log_abs_loc = loc.abs().log1p() if self.config.input_size == 1 else loc.squeeze(1).abs().log1p()
+ log_scale = scale.log() if self.config.input_size == 1 else scale.squeeze(1).log()
+ static_feat = torch.cat((log_abs_loc, log_scale), dim=1)
+
+ if static_real_features is not None:
+ static_feat = torch.cat((static_real_features, static_feat), dim=1)
+ if static_categorical_features is not None:
+ embedded_cat = self.embedder(static_categorical_features)
+ static_feat = torch.cat((embedded_cat, static_feat), dim=1)
+ expanded_static_feat = static_feat.unsqueeze(1).expand(-1, time_feat.shape[1], -1)
+
+ # all features
+ features = torch.cat((expanded_static_feat, time_feat), dim=-1)
+
+ # lagged features
+ subsequences_length = (
+ self.config.context_length + self.config.prediction_length
+ if future_values is not None
+ else self.config.context_length
+ )
+ lagged_sequence = self.get_lagged_subsequences(sequence=inputs, subsequences_length=subsequences_length)
+ lags_shape = lagged_sequence.shape
+ reshaped_lagged_sequence = lagged_sequence.reshape(lags_shape[0], lags_shape[1], -1)
+
+ if reshaped_lagged_sequence.shape[1] != time_feat.shape[1]:
+ raise ValueError(
+ f"input length {reshaped_lagged_sequence.shape[1]} and time feature lengths {time_feat.shape[1]} does not match"
+ )
+ return reshaped_lagged_sequence, features, loc, scale, static_feat
+
+ def get_encoder(self):
+ return self.encoder
+
+ def get_decoder(self):
+ return self.decoder
+
+ @add_start_docstrings_to_model_forward(AUTOFORMER_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=AutoformerModelOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ past_values: torch.Tensor,
+ past_time_features: torch.Tensor,
+ past_observed_mask: torch.Tensor,
+ static_categorical_features: Optional[torch.Tensor] = None,
+ static_real_features: Optional[torch.Tensor] = None,
+ future_values: Optional[torch.Tensor] = None,
+ future_time_features: Optional[torch.Tensor] = None,
+ decoder_attention_mask: Optional[torch.LongTensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ decoder_head_mask: Optional[torch.Tensor] = None,
+ cross_attn_head_mask: Optional[torch.Tensor] = None,
+ encoder_outputs: Optional[List[torch.FloatTensor]] = None,
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
+ output_hidden_states: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ use_cache: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[AutoformerModelOutput, Tuple]:
+ r"""
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> from huggingface_hub import hf_hub_download
+ >>> import torch
+ >>> from transformers import AutoformerModel
+
+ >>> file = hf_hub_download(
+ ... repo_id="hf-internal-testing/tourism-monthly-batch", filename="train-batch.pt", repo_type="dataset"
+ ... )
+ >>> batch = torch.load(file)
+
+ >>> model = AutoformerModel.from_pretrained("huggingface/autoformer-tourism-monthly")
+
+ >>> # during training, one provides both past and future values
+ >>> # as well as possible additional features
+ >>> outputs = model(
+ ... past_values=batch["past_values"],
+ ... past_time_features=batch["past_time_features"],
+ ... past_observed_mask=batch["past_observed_mask"],
+ ... static_categorical_features=batch["static_categorical_features"],
+ ... future_values=batch["future_values"],
+ ... future_time_features=batch["future_time_features"],
+ ... )
+
+ >>> last_hidden_state = outputs.last_hidden_state
+ ```"""
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ transformer_inputs, temporal_features, loc, scale, static_feat = self.create_network_inputs(
+ past_values=past_values,
+ past_time_features=past_time_features,
+ past_observed_mask=past_observed_mask,
+ static_categorical_features=static_categorical_features,
+ static_real_features=static_real_features,
+ future_values=future_values,
+ future_time_features=future_time_features,
+ )
+
+ if encoder_outputs is None:
+ enc_input = torch.cat(
+ (
+ transformer_inputs[:, : self.config.context_length, ...],
+ temporal_features[:, : self.config.context_length, ...],
+ ),
+ dim=-1,
+ )
+ encoder_outputs = self.encoder(
+ inputs_embeds=enc_input,
+ head_mask=head_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ # If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True
+ elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):
+ encoder_outputs = BaseModelOutput(
+ last_hidden_state=encoder_outputs[0],
+ hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
+ attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
+ )
+
+ if future_values is not None:
+ # Decoder inputs
+ # seasonality and trend from context length
+ seasonal_input, trend_input = self.decomposition_layer(
+ transformer_inputs[:, : self.config.context_length, ...]
+ )
+ mean = (
+ torch.mean(transformer_inputs[:, : self.config.context_length, ...], dim=1)
+ .unsqueeze(1)
+ .repeat(1, self.config.prediction_length, 1)
+ )
+ zeros = torch.zeros(
+ [transformer_inputs.shape[0], self.config.prediction_length, transformer_inputs.shape[2]],
+ device=enc_input.device,
+ )
+
+ decoder_input = torch.cat(
+ (
+ torch.cat((seasonal_input[:, -self.config.label_length :, ...], zeros), dim=1),
+ temporal_features[:, self.config.context_length - self.config.label_length :, ...],
+ ),
+ dim=-1,
+ )
+ trend_init = torch.cat(
+ (
+ torch.cat((trend_input[:, -self.config.label_length :, ...], mean), dim=1),
+ temporal_features[:, self.config.context_length - self.config.label_length :, ...],
+ ),
+ dim=-1,
+ )
+
+ decoder_outputs = self.decoder(
+ trend=trend_init,
+ inputs_embeds=decoder_input,
+ attention_mask=decoder_attention_mask,
+ encoder_hidden_states=encoder_outputs[0],
+ head_mask=decoder_head_mask,
+ cross_attn_head_mask=cross_attn_head_mask,
+ past_key_values=past_key_values,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ else:
+ decoder_outputs = AutoFormerDecoderOutput()
+
+ if not return_dict:
+ return decoder_outputs + encoder_outputs + (loc, scale, static_feat)
+
+ return AutoformerModelOutput(
+ last_hidden_state=decoder_outputs.last_hidden_state,
+ trend=decoder_outputs.trend,
+ past_key_values=decoder_outputs.past_key_values,
+ decoder_hidden_states=decoder_outputs.hidden_states,
+ decoder_attentions=decoder_outputs.attentions,
+ cross_attentions=decoder_outputs.cross_attentions,
+ encoder_last_hidden_state=encoder_outputs.last_hidden_state,
+ encoder_hidden_states=encoder_outputs.hidden_states,
+ encoder_attentions=encoder_outputs.attentions,
+ loc=loc,
+ scale=scale,
+ static_features=static_feat,
+ )
+
+
+@add_start_docstrings(
+ "The Autoformer Model with a distribution head on top for time-series forecasting.",
+ AUTOFORMER_START_DOCSTRING,
+)
+class AutoformerForPrediction(AutoformerPreTrainedModel):
+ def __init__(self, config: AutoformerConfig):
+ super().__init__(config)
+ self.model = AutoformerModel(config)
+ if config.distribution_output == "student_t":
+ self.distribution_output = StudentTOutput(dim=config.input_size)
+ elif config.distribution_output == "normal":
+ self.distribution_output = NormalOutput(dim=config.input_size)
+ elif config.distribution_output == "negative_binomial":
+ self.distribution_output = NegativeBinomialOutput(dim=config.input_size)
+ else:
+ raise ValueError(f"Unknown distribution output {config.distribution_output}")
+
+ self.parameter_projection = self.distribution_output.get_parameter_projection(self.model.config.feature_size)
+ self.target_shape = self.distribution_output.event_shape
+
+ if config.loss == "nll":
+ self.loss = nll
+ else:
+ raise ValueError(f"Unknown loss function {config.loss}")
+
+ # Initialize weights of distribution_output and apply final processing
+ self.post_init()
+
+ def output_params(self, decoder_output):
+ return self.parameter_projection(decoder_output[:, -self.config.prediction_length :, :])
+
+ def get_encoder(self):
+ return self.model.get_encoder()
+
+ def get_decoder(self):
+ return self.model.get_decoder()
+
+ @torch.jit.ignore
+ def output_distribution(self, params, loc=None, scale=None, trailing_n=None) -> torch.distributions.Distribution:
+ sliced_params = params
+ if trailing_n is not None:
+ sliced_params = [p[:, -trailing_n:] for p in params]
+ return self.distribution_output.distribution(sliced_params, loc=loc, scale=scale)
+
+ @add_start_docstrings_to_model_forward(AUTOFORMER_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=Seq2SeqTSPredictionOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ past_values: torch.Tensor,
+ past_time_features: torch.Tensor,
+ past_observed_mask: torch.Tensor,
+ static_categorical_features: Optional[torch.Tensor] = None,
+ static_real_features: Optional[torch.Tensor] = None,
+ future_values: Optional[torch.Tensor] = None,
+ future_time_features: Optional[torch.Tensor] = None,
+ future_observed_mask: Optional[torch.Tensor] = None,
+ decoder_attention_mask: Optional[torch.LongTensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ decoder_head_mask: Optional[torch.Tensor] = None,
+ cross_attn_head_mask: Optional[torch.Tensor] = None,
+ encoder_outputs: Optional[List[torch.FloatTensor]] = None,
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
+ output_hidden_states: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ use_cache: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Seq2SeqTSPredictionOutput, Tuple]:
+ r"""
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> from huggingface_hub import hf_hub_download
+ >>> import torch
+ >>> from transformers import AutoformerForPrediction
+
+ >>> file = hf_hub_download(
+ ... repo_id="hf-internal-testing/tourism-monthly-batch", filename="train-batch.pt", repo_type="dataset"
+ ... )
+ >>> batch = torch.load(file)
+
+ >>> model = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly")
+
+ >>> # during training, one provides both past and future values
+ >>> # as well as possible additional features
+ >>> outputs = model(
+ ... past_values=batch["past_values"],
+ ... past_time_features=batch["past_time_features"],
+ ... past_observed_mask=batch["past_observed_mask"],
+ ... static_categorical_features=batch["static_categorical_features"],
+ ... future_values=batch["future_values"],
+ ... future_time_features=batch["future_time_features"],
+ ... )
+
+ >>> loss = outputs.loss
+ >>> loss.backward()
+
+ >>> # during inference, one only provides past values
+ >>> # as well as possible additional features
+ >>> # the model autoregressively generates future values
+ >>> outputs = model.generate(
+ ... past_values=batch["past_values"],
+ ... past_time_features=batch["past_time_features"],
+ ... past_observed_mask=batch["past_observed_mask"],
+ ... static_categorical_features=batch["static_categorical_features"],
+ ... future_time_features=batch["future_time_features"],
+ ... )
+
+ >>> mean_prediction = outputs.sequences.mean(dim=1)
+ ```
+
+
+
+ The AutoformerForPrediction can also use static_real_features. To do so, set num_static_real_features in
+ AutoformerConfig based on number of such features in the dataset (in case of tourism_monthly dataset it
+ is equal to 1), initialize the model and call as shown below:
+
+ ```
+ >>> from huggingface_hub import hf_hub_download
+ >>> import torch
+ >>> from transformers import AutoformerConfig, AutoformerForPrediction
+
+ >>> file = hf_hub_download(
+ ... repo_id="hf-internal-testing/tourism-monthly-batch", filename="train-batch.pt", repo_type="dataset"
+ ... )
+ >>> batch = torch.load(file)
+
+ >>> # check number of static real features
+ >>> num_static_real_features = batch["static_real_features"].shape[-1]
+
+ >>> # load configuration of pretrained model and override num_static_real_features
+ >>> configuration = AutoformerConfig.from_pretrained(
+ ... "huggingface/autoformer-tourism-monthly",
+ ... num_static_real_features=num_static_real_features,
+ ... )
+ >>> # we also need to update feature_size as it is not recalculated
+ >>> configuration.feature_size += num_static_real_features
+
+ >>> model = AutoformerForPrediction(configuration)
+
+ >>> outputs = model(
+ ... past_values=batch["past_values"],
+ ... past_time_features=batch["past_time_features"],
+ ... past_observed_mask=batch["past_observed_mask"],
+ ... static_categorical_features=batch["static_categorical_features"],
+ ... static_real_features=batch["static_real_features"],
+ ... future_values=batch["future_values"],
+ ... future_time_features=batch["future_time_features"],
+ ... )
+ ```
+
+
+ """
+
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+ if future_values is not None:
+ use_cache = False
+
+ outputs = self.model(
+ past_values=past_values,
+ past_time_features=past_time_features,
+ past_observed_mask=past_observed_mask,
+ static_categorical_features=static_categorical_features,
+ static_real_features=static_real_features,
+ future_values=future_values,
+ future_time_features=future_time_features,
+ decoder_attention_mask=decoder_attention_mask,
+ head_mask=head_mask,
+ decoder_head_mask=decoder_head_mask,
+ cross_attn_head_mask=cross_attn_head_mask,
+ encoder_outputs=encoder_outputs,
+ past_key_values=past_key_values,
+ output_hidden_states=output_hidden_states,
+ output_attentions=output_attentions,
+ use_cache=use_cache,
+ return_dict=return_dict,
+ )
+
+ prediction_loss = None
+ params = None
+ if future_values is not None:
+ # outputs.last_hidden_state and trend
+ # loc is 4rd last and scale is 3rd last output
+ params = self.output_params(outputs[0] + outputs[1])
+ distribution = self.output_distribution(params, loc=outputs[-3], scale=outputs[-2])
+
+ loss = self.loss(distribution, future_values)
+
+ if future_observed_mask is None:
+ future_observed_mask = torch.ones_like(future_values)
+
+ if len(self.target_shape) == 0:
+ loss_weights = future_observed_mask
+ else:
+ loss_weights, _ = future_observed_mask.min(dim=-1, keepdim=False)
+
+ prediction_loss = weighted_average(loss, weights=loss_weights)
+
+ if not return_dict:
+ outputs = ((params,) + outputs[2:]) if params is not None else outputs[2:]
+ return ((prediction_loss,) + outputs) if prediction_loss is not None else outputs
+
+ return Seq2SeqTSPredictionOutput(
+ loss=prediction_loss,
+ params=params,
+ past_key_values=outputs.past_key_values,
+ decoder_hidden_states=outputs.decoder_hidden_states,
+ decoder_attentions=outputs.decoder_attentions,
+ cross_attentions=outputs.cross_attentions,
+ encoder_last_hidden_state=outputs.encoder_last_hidden_state,
+ encoder_hidden_states=outputs.encoder_hidden_states,
+ encoder_attentions=outputs.encoder_attentions,
+ loc=outputs.loc,
+ scale=outputs.scale,
+ static_features=outputs.static_features,
+ )
+
+ @torch.no_grad()
+ def generate(
+ self,
+ past_values: torch.Tensor,
+ past_time_features: torch.Tensor,
+ future_time_features: torch.Tensor,
+ past_observed_mask: Optional[torch.Tensor] = None,
+ static_categorical_features: Optional[torch.Tensor] = None,
+ static_real_features: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ ) -> SampleTSPredictionOutput:
+ r"""
+ Greedily generate sequences of sample predictions from a model with a probability distribution head.
+
+ Parameters:
+ past_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)` or `(batch_size, sequence_length, input_size)`):
+ Past values of the time series, that serve as context in order to predict the future. The sequence size
+ of this tensor must be larger than the `context_length` of the model, since the model will use the
+ larger size to construct lag features, i.e. additional values from the past which are added in order to
+ serve as "extra context".
+
+ The `sequence_length` here is equal to `config.context_length` + `max(config.lags_sequence)`, which if
+ no `lags_sequence` is configured, is equal to `config.context_length` + 7 (as by default, the largest
+ look-back index in `config.lags_sequence` is 7). The property `_past_length` returns the actual length
+ of the past.
+
+ The `past_values` is what the Transformer encoder gets as input (with optional additional features,
+ such as `static_categorical_features`, `static_real_features`, `past_time_features` and lags).
+
+ Optionally, missing values need to be replaced with zeros and indicated via the `past_observed_mask`.
+
+ For multivariate time series, the `input_size` > 1 dimension is required and corresponds to the number
+ of variates in the time series per time step.
+ past_time_features (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_features)`):
+ Required time features, which the model internally will add to `past_values`. These could be things
+ like "month of year", "day of the month", etc. encoded as vectors (for instance as Fourier features).
+ These could also be so-called "age" features, which basically help the model know "at which point in
+ life" a time-series is. Age features have small values for distant past time steps and increase
+ monotonically the more we approach the current time step. Holiday features are also a good example of
+ time features.
+
+ These features serve as the "positional encodings" of the inputs. So contrary to a model like BERT,
+ where the position encodings are learned from scratch internally as parameters of the model, the Time
+ Series Transformer requires to provide additional time features. The Time Series Transformer only
+ learns additional embeddings for `static_categorical_features`.
+
+ Additional dynamic real covariates can be concatenated to this tensor, with the caveat that these
+ features must but known at prediction time.
+
+ The `num_features` here is equal to `config.`num_time_features` + `config.num_dynamic_real_features`.
+ future_time_features (`torch.FloatTensor` of shape `(batch_size, prediction_length, num_features)`):
+ Required time features for the prediction window, which the model internally will add to sampled
+ predictions. These could be things like "month of year", "day of the month", etc. encoded as vectors
+ (for instance as Fourier features). These could also be so-called "age" features, which basically help
+ the model know "at which point in life" a time-series is. Age features have small values for distant
+ past time steps and increase monotonically the more we approach the current time step. Holiday features
+ are also a good example of time features.
+
+ These features serve as the "positional encodings" of the inputs. So contrary to a model like BERT,
+ where the position encodings are learned from scratch internally as parameters of the model, the Time
+ Series Transformer requires to provide additional time features. The Time Series Transformer only
+ learns additional embeddings for `static_categorical_features`.
+
+ Additional dynamic real covariates can be concatenated to this tensor, with the caveat that these
+ features must but known at prediction time.
+
+ The `num_features` here is equal to `config.`num_time_features` + `config.num_dynamic_real_features`.
+ past_observed_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length)` or `(batch_size, sequence_length, input_size)`, *optional*):
+ Boolean mask to indicate which `past_values` were observed and which were missing. Mask values selected
+ in `[0, 1]`:
+
+ - 1 for values that are **observed**,
+ - 0 for values that are **missing** (i.e. NaNs that were replaced by zeros).
+
+ static_categorical_features (`torch.LongTensor` of shape `(batch_size, number of static categorical features)`, *optional*):
+ Optional static categorical features for which the model will learn an embedding, which it will add to
+ the values of the time series.
+
+ Static categorical features are features which have the same value for all time steps (static over
+ time).
+
+ A typical example of a static categorical feature is a time series ID.
+ static_real_features (`torch.FloatTensor` of shape `(batch_size, number of static real features)`, *optional*):
+ Optional static real features which the model will add to the values of the time series.
+
+ Static real features are features which have the same value for all time steps (static over time).
+
+ A typical example of a static real feature is promotion information.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers.
+
+ Return:
+ [`SampleTSPredictionOutput`] where the outputs `sequences` tensor will have shape `(batch_size, number of
+ samples, prediction_length)` or `(batch_size, number of samples, prediction_length, input_size)` for
+ multivariate predictions.
+ """
+ outputs = self(
+ static_categorical_features=static_categorical_features,
+ static_real_features=static_real_features,
+ past_time_features=past_time_features,
+ past_values=past_values,
+ past_observed_mask=past_observed_mask,
+ future_time_features=None,
+ future_values=None,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=True,
+ use_cache=False,
+ )
+
+ decoder = self.model.get_decoder()
+ enc_last_hidden = outputs.encoder_last_hidden_state
+ loc = outputs.loc
+ scale = outputs.scale
+ static_feat = outputs.static_features
+
+ num_parallel_samples = self.config.num_parallel_samples
+ repeated_loc = loc.repeat_interleave(repeats=num_parallel_samples, dim=0)
+ repeated_scale = scale.repeat_interleave(repeats=num_parallel_samples, dim=0)
+
+ repeated_past_values = (
+ past_values.repeat_interleave(repeats=num_parallel_samples, dim=0) - repeated_loc
+ ) / repeated_scale
+
+ time_features = torch.cat((past_time_features, future_time_features), dim=1)
+
+ expanded_static_feat = static_feat.unsqueeze(1).expand(-1, time_features.shape[1], -1)
+ features = torch.cat((expanded_static_feat, time_features), dim=-1)
+ repeated_features = features.repeat_interleave(repeats=num_parallel_samples, dim=0)
+
+ repeated_enc_last_hidden = enc_last_hidden.repeat_interleave(repeats=num_parallel_samples, dim=0)
+
+ lagged_sequence = self.model.get_lagged_subsequences(
+ sequence=repeated_past_values, subsequences_length=self.config.context_length
+ )
+ lags_shape = lagged_sequence.shape
+ reshaped_lagged_sequence = lagged_sequence.reshape(lags_shape[0], lags_shape[1], -1)
+ seasonal_input, trend_input = self.model.decomposition_layer(reshaped_lagged_sequence)
+
+ mean = torch.mean(reshaped_lagged_sequence, dim=1).unsqueeze(1).repeat(1, self.config.prediction_length, 1)
+ zeros = torch.zeros(
+ [reshaped_lagged_sequence.shape[0], self.config.prediction_length, reshaped_lagged_sequence.shape[2]],
+ device=reshaped_lagged_sequence.device,
+ )
+
+ decoder_input = torch.cat(
+ (
+ torch.cat((seasonal_input[:, -self.config.label_length :, ...], zeros), dim=1),
+ repeated_features[:, -self.config.prediction_length - self.config.label_length :, ...],
+ ),
+ dim=-1,
+ )
+ trend_init = torch.cat(
+ (
+ torch.cat((trend_input[:, -self.config.label_length :, ...], mean), dim=1),
+ repeated_features[:, -self.config.prediction_length - self.config.label_length :, ...],
+ ),
+ dim=-1,
+ )
+ decoder_outputs = decoder(
+ trend=trend_init, inputs_embeds=decoder_input, encoder_hidden_states=repeated_enc_last_hidden
+ )
+ decoder_last_hidden = decoder_outputs.last_hidden_state
+ trend = decoder_outputs.trend
+ params = self.output_params(decoder_last_hidden + trend)
+ distr = self.output_distribution(params, loc=repeated_loc, scale=repeated_scale)
+ future_samples = distr.sample()
+
+ return SampleTSPredictionOutput(
+ sequences=future_samples.reshape(
+ (-1, num_parallel_samples, self.config.prediction_length) + self.target_shape,
+ )
+ )
diff --git a/janus/lib/python3.10/site-packages/transformers/models/bros/__pycache__/__init__.cpython-310.pyc b/janus/lib/python3.10/site-packages/transformers/models/bros/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f10b3fe1215d3785642e6c5a2f5d5deb70792dac
Binary files /dev/null and b/janus/lib/python3.10/site-packages/transformers/models/bros/__pycache__/__init__.cpython-310.pyc differ
diff --git a/janus/lib/python3.10/site-packages/transformers/models/bros/__pycache__/configuration_bros.cpython-310.pyc b/janus/lib/python3.10/site-packages/transformers/models/bros/__pycache__/configuration_bros.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3feaa3e0bdc1959fc7d7829362c616f33795df3a
Binary files /dev/null and b/janus/lib/python3.10/site-packages/transformers/models/bros/__pycache__/configuration_bros.cpython-310.pyc differ
diff --git a/janus/lib/python3.10/site-packages/transformers/models/bros/__pycache__/modeling_bros.cpython-310.pyc b/janus/lib/python3.10/site-packages/transformers/models/bros/__pycache__/modeling_bros.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3216bb755839ce082a38becbbc3dd4b829d5f49f
Binary files /dev/null and b/janus/lib/python3.10/site-packages/transformers/models/bros/__pycache__/modeling_bros.cpython-310.pyc differ
diff --git a/janus/lib/python3.10/site-packages/transformers/models/bros/configuration_bros.py b/janus/lib/python3.10/site-packages/transformers/models/bros/configuration_bros.py
new file mode 100644
index 0000000000000000000000000000000000000000..84c9989f309fb782b47aed37f9728dd7a26ba6b8
--- /dev/null
+++ b/janus/lib/python3.10/site-packages/transformers/models/bros/configuration_bros.py
@@ -0,0 +1,138 @@
+# coding=utf-8
+# Copyright 2023-present NAVER Corp, The Microsoft Research Asia LayoutLM Team Authors and the HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Bros model configuration"""
+
+from ...configuration_utils import PretrainedConfig
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+
+class BrosConfig(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`BrosModel`] or a [`TFBrosModel`]. It is used to
+ instantiate a Bros model according to the specified arguments, defining the model architecture. Instantiating a
+ configuration with the defaults will yield a similar configuration to that of the Bros
+ [jinho8345/bros-base-uncased](https://huggingface.co/jinho8345/bros-base-uncased) architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+ Args:
+ vocab_size (`int`, *optional*, defaults to 30522):
+ Vocabulary size of the Bros model. Defines the number of different tokens that can be represented by the
+ `inputs_ids` passed when calling [`BrosModel`] or [`TFBrosModel`].
+ hidden_size (`int`, *optional*, defaults to 768):
+ Dimensionality of the encoder layers and the pooler layer.
+ num_hidden_layers (`int`, *optional*, defaults to 12):
+ Number of hidden layers in the Transformer encoder.
+ num_attention_heads (`int`, *optional*, defaults to 12):
+ Number of attention heads for each attention layer in the Transformer encoder.
+ intermediate_size (`int`, *optional*, defaults to 3072):
+ Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
+ hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`):
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
+ The dropout ratio for the attention probabilities.
+ max_position_embeddings (`int`, *optional*, defaults to 512):
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
+ just in case (e.g., 512 or 1024 or 2048).
+ type_vocab_size (`int`, *optional*, defaults to 2):
+ The vocabulary size of the `token_type_ids` passed when calling [`BrosModel`] or [`TFBrosModel`].
+ initializer_range (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
+ The epsilon used by the layer normalization layers.
+ pad_token_id (`int`, *optional*, defaults to 0):
+ The index of the padding token in the token vocabulary.
+ dim_bbox (`int`, *optional*, defaults to 8):
+ The dimension of the bounding box coordinates. (x0, y1, x1, y0, x1, y1, x0, y1)
+ bbox_scale (`float`, *optional*, defaults to 100.0):
+ The scale factor of the bounding box coordinates.
+ n_relations (`int`, *optional*, defaults to 1):
+ The number of relations for SpadeEE(entity extraction), SpadeEL(entity linking) head.
+ classifier_dropout_prob (`float`, *optional*, defaults to 0.1):
+ The dropout ratio for the classifier head.
+
+
+ Examples:
+
+ ```python
+ >>> from transformers import BrosConfig, BrosModel
+
+ >>> # Initializing a BROS jinho8345/bros-base-uncased style configuration
+ >>> configuration = BrosConfig()
+
+ >>> # Initializing a model from the jinho8345/bros-base-uncased style configuration
+ >>> model = BrosModel(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = "bros"
+
+ def __init__(
+ self,
+ vocab_size=30522,
+ hidden_size=768,
+ num_hidden_layers=12,
+ num_attention_heads=12,
+ intermediate_size=3072,
+ hidden_act="gelu",
+ hidden_dropout_prob=0.1,
+ attention_probs_dropout_prob=0.1,
+ max_position_embeddings=512,
+ type_vocab_size=2,
+ initializer_range=0.02,
+ layer_norm_eps=1e-12,
+ pad_token_id=0,
+ dim_bbox=8,
+ bbox_scale=100.0,
+ n_relations=1,
+ classifier_dropout_prob=0.1,
+ **kwargs,
+ ):
+ super().__init__(
+ vocab_size=vocab_size,
+ hidden_size=hidden_size,
+ num_hidden_layers=num_hidden_layers,
+ num_attention_heads=num_attention_heads,
+ intermediate_size=intermediate_size,
+ hidden_act=hidden_act,
+ hidden_dropout_prob=hidden_dropout_prob,
+ attention_probs_dropout_prob=attention_probs_dropout_prob,
+ max_position_embeddings=max_position_embeddings,
+ type_vocab_size=type_vocab_size,
+ initializer_range=initializer_range,
+ layer_norm_eps=layer_norm_eps,
+ pad_token_id=pad_token_id,
+ **kwargs,
+ )
+
+ self.dim_bbox = dim_bbox
+ self.bbox_scale = bbox_scale
+ self.n_relations = n_relations
+ self.dim_bbox_sinusoid_emb_2d = self.hidden_size // 4
+ self.dim_bbox_sinusoid_emb_1d = self.dim_bbox_sinusoid_emb_2d // self.dim_bbox
+ self.dim_bbox_projection = self.hidden_size // self.num_attention_heads
+ self.classifier_dropout_prob = classifier_dropout_prob
+
+
+__all__ = ["BrosConfig"]
diff --git a/janus/lib/python3.10/site-packages/transformers/models/bros/modeling_bros.py b/janus/lib/python3.10/site-packages/transformers/models/bros/modeling_bros.py
new file mode 100644
index 0000000000000000000000000000000000000000..0e1e86c0b39f7e427374dc7ef3f6eb3c63f4dd48
--- /dev/null
+++ b/janus/lib/python3.10/site-packages/transformers/models/bros/modeling_bros.py
@@ -0,0 +1,1323 @@
+# coding=utf-8
+# Copyright 2023-present NAVER Corp, The Microsoft Research Asia LayoutLM Team Authors and the HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""PyTorch Bros model."""
+
+import math
+from dataclasses import dataclass
+from typing import List, Optional, Tuple, Union
+
+import torch
+import torch.utils.checkpoint
+from torch import nn
+from torch.nn import CrossEntropyLoss
+
+from ...activations import ACT2FN
+from ...modeling_outputs import (
+ BaseModelOutputWithPastAndCrossAttentions,
+ BaseModelOutputWithPoolingAndCrossAttentions,
+ TokenClassifierOutput,
+)
+from ...modeling_utils import PreTrainedModel
+from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
+from ...utils import (
+ ModelOutput,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ logging,
+ replace_return_docstrings,
+)
+from .configuration_bros import BrosConfig
+
+
+logger = logging.get_logger(__name__)
+
+_CHECKPOINT_FOR_DOC = "jinho8345/bros-base-uncased"
+_CONFIG_FOR_DOC = "BrosConfig"
+
+
+BROS_START_DOCSTRING = r"""
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
+ and behavior.
+
+ Parameters:
+ config ([`BrosConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+BROS_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `({0})`):
+ Indices of input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`BrosProcessor`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+
+ bbox ('torch.FloatTensor' of shape '(batch_size, num_boxes, 4)'):
+ Bounding box coordinates for each token in the input sequence. Each bounding box is a list of four values
+ (x1, y1, x2, y2), where (x1, y1) is the top left corner, and (x2, y2) is the bottom right corner of the
+ bounding box.
+
+ attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+
+ bbox_first_token_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
+ Mask to indicate the first token of each bounding box. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
+ 1]`:
+
+ - 0 corresponds to a *sentence A* token,
+ - 1 corresponds to a *sentence B* token.
+
+ [What are token type IDs?](../glossary#token-type-ids)
+
+ position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
+ config.max_position_embeddings - 1]`.
+
+ [What are position IDs?](../glossary#position-ids)
+
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
+ model's internal embedding lookup matrix.
+
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+@dataclass
+class BrosSpadeOutput(ModelOutput):
+ """
+ Base class for outputs of token classification models.
+
+ Args:
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided) :
+ Classification loss.
+ initial_token_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.num_labels)`):
+ Classification scores for entity initial tokens (before SoftMax).
+ subsequent_token_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, sequence_length+1)`):
+ Classification scores for entity sequence tokens (before SoftMax).
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ """
+
+ loss: Optional[torch.FloatTensor] = None
+ initial_token_logits: torch.FloatTensor = None
+ subsequent_token_logits: torch.FloatTensor = None
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
+
+
+class BrosPositionalEmbedding1D(nn.Module):
+ # Reference: https://github.com/kimiyoung/transformer-xl/blob/master/pytorch/mem_transformer.py#L15
+
+ def __init__(self, config):
+ super(BrosPositionalEmbedding1D, self).__init__()
+
+ self.dim_bbox_sinusoid_emb_1d = config.dim_bbox_sinusoid_emb_1d
+
+ inv_freq = 1 / (
+ 10000 ** (torch.arange(0.0, self.dim_bbox_sinusoid_emb_1d, 2.0) / self.dim_bbox_sinusoid_emb_1d)
+ )
+ self.register_buffer("inv_freq", inv_freq)
+
+ def forward(self, pos_seq: torch.Tensor) -> torch.Tensor:
+ seq_size = pos_seq.size()
+ b1, b2, b3 = seq_size
+ sinusoid_inp = pos_seq.view(b1, b2, b3, 1) * self.inv_freq.view(1, 1, 1, self.dim_bbox_sinusoid_emb_1d // 2)
+ pos_emb = torch.cat([sinusoid_inp.sin(), sinusoid_inp.cos()], dim=-1)
+ return pos_emb
+
+
+class BrosPositionalEmbedding2D(nn.Module):
+ def __init__(self, config):
+ super(BrosPositionalEmbedding2D, self).__init__()
+
+ self.dim_bbox = config.dim_bbox
+ self.x_pos_emb = BrosPositionalEmbedding1D(config)
+ self.y_pos_emb = BrosPositionalEmbedding1D(config)
+
+ def forward(self, bbox: torch.Tensor) -> torch.Tensor:
+ stack = []
+ for i in range(self.dim_bbox):
+ if i % 2 == 0:
+ stack.append(self.x_pos_emb(bbox[..., i]))
+ else:
+ stack.append(self.y_pos_emb(bbox[..., i]))
+ bbox_pos_emb = torch.cat(stack, dim=-1)
+ return bbox_pos_emb
+
+
+class BrosBboxEmbeddings(nn.Module):
+ def __init__(self, config):
+ super(BrosBboxEmbeddings, self).__init__()
+ self.bbox_sinusoid_emb = BrosPositionalEmbedding2D(config)
+ self.bbox_projection = nn.Linear(config.dim_bbox_sinusoid_emb_2d, config.dim_bbox_projection, bias=False)
+
+ def forward(self, bbox: torch.Tensor):
+ bbox_t = bbox.transpose(0, 1)
+ bbox_pos = bbox_t[None, :, :, :] - bbox_t[:, None, :, :]
+ bbox_pos_emb = self.bbox_sinusoid_emb(bbox_pos)
+ bbox_pos_emb = self.bbox_projection(bbox_pos_emb)
+
+ return bbox_pos_emb
+
+
+class BrosTextEmbeddings(nn.Module):
+ """Construct the embeddings from word, position and token_type embeddings."""
+
+ def __init__(self, config):
+ super().__init__()
+
+ self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
+ self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
+ self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
+
+ # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
+ # any TensorFlow checkpoint file
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+ # position_ids (1, len position emb) is contiguous in memory and exported when serialized
+ self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
+ self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)))
+ self.register_buffer(
+ "token_type_ids",
+ torch.zeros(
+ self.position_ids.size(),
+ dtype=torch.long,
+ device=self.position_ids.device,
+ ),
+ persistent=False,
+ )
+
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ token_type_ids: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ past_key_values_length: int = 0,
+ ) -> torch.Tensor:
+ if input_ids is not None:
+ input_shape = input_ids.size()
+ else:
+ input_shape = inputs_embeds.size()[:-1]
+
+ seq_length = input_shape[1]
+
+ if position_ids is None:
+ position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length]
+
+ if token_type_ids is None:
+ if hasattr(self, "token_type_ids"):
+ buffered_token_type_ids = self.token_type_ids[:, :seq_length]
+ buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length)
+ token_type_ids = buffered_token_type_ids_expanded
+ else:
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
+
+ if inputs_embeds is None:
+ inputs_embeds = self.word_embeddings(input_ids)
+ token_type_embeddings = self.token_type_embeddings(token_type_ids)
+
+ embeddings = inputs_embeds + token_type_embeddings
+ if self.position_embedding_type == "absolute":
+ position_embeddings = self.position_embeddings(position_ids)
+ embeddings += position_embeddings
+ embeddings = self.LayerNorm(embeddings)
+ embeddings = self.dropout(embeddings)
+ return embeddings
+
+
+class BrosSelfAttention(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
+ raise ValueError(
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
+ f"heads ({config.num_attention_heads})"
+ )
+
+ self.num_attention_heads = config.num_attention_heads
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
+
+ self.query = nn.Linear(config.hidden_size, self.all_head_size)
+ self.key = nn.Linear(config.hidden_size, self.all_head_size)
+ self.value = nn.Linear(config.hidden_size, self.all_head_size)
+
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
+ self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
+ self.max_position_embeddings = config.max_position_embeddings
+ self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
+
+ self.is_decoder = config.is_decoder
+
+ def transpose_for_scores(self, x: torch.Tensor):
+ new_x_shape = x.size()[:-1] + (
+ self.num_attention_heads,
+ self.attention_head_size,
+ )
+ x = x.view(*new_x_shape)
+ return x.permute(0, 2, 1, 3)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ bbox_pos_emb: torch.Tensor,
+ attention_mask: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ encoder_hidden_states: Optional[torch.Tensor] = None,
+ encoder_attention_mask: Optional[torch.Tensor] = None,
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ output_attentions: Optional[torch.Tensor] = False,
+ ) -> Tuple[torch.Tensor]:
+ mixed_query_layer = self.query(hidden_states)
+
+ # If this is instantiated as a cross-attention module, the keys
+ # and values come from an encoder; the attention mask needs to be
+ # such that the encoder's padding tokens are not attended to.
+ is_cross_attention = encoder_hidden_states is not None
+
+ if is_cross_attention and past_key_value is not None:
+ # reuse k,v, cross_attentions
+ key_layer = past_key_value[0]
+ value_layer = past_key_value[1]
+ attention_mask = encoder_attention_mask
+ elif is_cross_attention:
+ key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
+ value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
+ attention_mask = encoder_attention_mask
+ elif past_key_value is not None:
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
+ key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
+ value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
+ else:
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
+
+ query_layer = self.transpose_for_scores(mixed_query_layer)
+
+ if self.is_decoder:
+ # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
+ # Further calls to cross_attention layer can then reuse all cross-attention
+ # key/value_states (first "if" case)
+ # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
+ past_key_value = (key_layer, value_layer)
+
+ # Take the dot product between "query" and "key" to get the raw attention scores.
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
+
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
+ seq_length = hidden_states.size()[1]
+ position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
+ position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
+ distance = position_ids_l - position_ids_r
+ positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
+ positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
+
+ if self.position_embedding_type == "relative_key":
+ relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
+ attention_scores = attention_scores + relative_position_scores
+ elif self.position_embedding_type == "relative_key_query":
+ relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
+ relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
+
+ attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
+
+ # bbox positional encoding
+ batch_size, n_head, seq_length, d_head = query_layer.shape
+ bbox_pos_emb = bbox_pos_emb.view(seq_length, seq_length, batch_size, d_head)
+ bbox_pos_emb = bbox_pos_emb.permute([2, 0, 1, 3])
+ bbox_pos_scores = torch.einsum("bnid,bijd->bnij", (query_layer, bbox_pos_emb))
+
+ attention_scores = attention_scores + bbox_pos_scores
+
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
+ if attention_mask is not None:
+ # Apply the attention mask is (precomputed for all layers in BrosModel forward() function)
+ attention_scores = attention_scores + attention_mask
+
+ # Normalize the attention scores to probabilities.
+ attention_probs = nn.Softmax(dim=-1)(attention_scores)
+
+ # This is actually dropping out entire tokens to attend to, which might
+ # seem a bit unusual, but is taken from the original Transformer paper.
+ attention_probs = self.dropout(attention_probs)
+
+ # Mask heads if we want to
+ if head_mask is not None:
+ attention_probs = attention_probs * head_mask
+
+ context_layer = torch.matmul(attention_probs, value_layer)
+
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
+ context_layer = context_layer.view(*new_context_layer_shape)
+
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
+
+ if self.is_decoder:
+ outputs = outputs + (past_key_value,)
+ return outputs
+
+
+# Copied from transformers.models.bert.modeling_bert.BertSelfOutput with Bert->Bros
+class BrosSelfOutput(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
+ return hidden_states
+
+
+class BrosAttention(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.self = BrosSelfAttention(config)
+ self.output = BrosSelfOutput(config)
+ self.pruned_heads = set()
+
+ def prune_heads(self, heads):
+ if len(heads) == 0:
+ return
+ heads, index = find_pruneable_heads_and_indices(
+ heads,
+ self.self.num_attention_heads,
+ self.self.attention_head_size,
+ self.pruned_heads,
+ )
+
+ # Prune linear layers
+ self.self.query = prune_linear_layer(self.self.query, index)
+ self.self.key = prune_linear_layer(self.self.key, index)
+ self.self.value = prune_linear_layer(self.self.value, index)
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
+
+ # Update hyper params and store pruned heads
+ self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
+ self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
+ self.pruned_heads = self.pruned_heads.union(heads)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ bbox_pos_emb: torch.Tensor,
+ attention_mask: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ encoder_hidden_states: Optional[torch.Tensor] = None,
+ encoder_attention_mask: Optional[torch.Tensor] = None,
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ output_attentions: Optional[bool] = False,
+ ) -> Tuple[torch.Tensor]:
+ self_outputs = self.self(
+ hidden_states=hidden_states,
+ bbox_pos_emb=bbox_pos_emb,
+ attention_mask=attention_mask,
+ head_mask=head_mask,
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=encoder_attention_mask,
+ past_key_value=past_key_value,
+ output_attentions=output_attentions,
+ )
+ attention_output = self.output(self_outputs[0], hidden_states)
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
+ return outputs
+
+
+# Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->Bros
+class BrosIntermediate(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
+ if isinstance(config.hidden_act, str):
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
+ else:
+ self.intermediate_act_fn = config.hidden_act
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.intermediate_act_fn(hidden_states)
+ return hidden_states
+
+
+class BrosOutput(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
+ return hidden_states
+
+
+class BrosLayer(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
+ self.seq_len_dim = 1
+ self.attention = BrosAttention(config)
+ self.is_decoder = config.is_decoder
+ self.add_cross_attention = config.add_cross_attention
+ if self.add_cross_attention:
+ if not self.is_decoder:
+ raise Exception(f"{self} should be used as a decoder model if cross attention is added")
+ self.crossattention = BrosAttention(config)
+ self.intermediate = BrosIntermediate(config)
+ self.output = BrosOutput(config)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ bbox_pos_emb: torch.Tensor,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ output_attentions: Optional[bool] = False,
+ ) -> Tuple[torch.Tensor]:
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
+ self_attention_outputs = self.attention(
+ hidden_states,
+ bbox_pos_emb=bbox_pos_emb,
+ attention_mask=attention_mask,
+ head_mask=head_mask,
+ output_attentions=output_attentions,
+ past_key_value=self_attn_past_key_value,
+ )
+ attention_output = self_attention_outputs[0]
+
+ # if decoder, the last output is tuple of self-attn cache
+ if self.is_decoder:
+ outputs = self_attention_outputs[1:-1]
+ present_key_value = self_attention_outputs[-1]
+ else:
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
+
+ cross_attn_present_key_value = None
+ if self.is_decoder and encoder_hidden_states is not None:
+ if hasattr(self, "crossattention"):
+ raise Exception(
+ f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`"
+ )
+
+ # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple
+ cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
+ cross_attention_outputs = self.crossattention(
+ attention_output,
+ attention_mask,
+ head_mask,
+ encoder_hidden_states,
+ encoder_attention_mask,
+ cross_attn_past_key_value,
+ output_attentions,
+ )
+ attention_output = cross_attention_outputs[0]
+ outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
+
+ # add cross-attn cache to positions 3,4 of present_key_value tuple
+ cross_attn_present_key_value = cross_attention_outputs[-1]
+ present_key_value = present_key_value + cross_attn_present_key_value
+
+ layer_output = apply_chunking_to_forward(
+ self.feed_forward_chunk,
+ self.chunk_size_feed_forward,
+ self.seq_len_dim,
+ attention_output,
+ )
+ outputs = (layer_output,) + outputs
+
+ # if decoder, return the attn key/values as the last output
+ if self.is_decoder:
+ outputs = outputs + (present_key_value,)
+
+ return outputs
+
+ def feed_forward_chunk(self, attention_output):
+ intermediate_output = self.intermediate(attention_output)
+ layer_output = self.output(intermediate_output, attention_output)
+ return layer_output
+
+
+class BrosEncoder(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.config = config
+ self.layer = nn.ModuleList([BrosLayer(config) for _ in range(config.num_hidden_layers)])
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ bbox_pos_emb: torch.Tensor,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = False,
+ output_hidden_states: Optional[bool] = False,
+ return_dict: Optional[bool] = True,
+ ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]:
+ all_hidden_states = () if output_hidden_states else None
+ all_self_attentions = () if output_attentions else None
+ all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
+
+ next_decoder_cache = () if use_cache else None
+ for i, layer_module in enumerate(self.layer):
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ layer_head_mask = head_mask[i] if head_mask is not None else None
+ past_key_value = past_key_values[i] if past_key_values is not None else None
+
+ if getattr(self.config, "gradient_checkpointing", False) and self.training:
+ if use_cache:
+ logger.warning(
+ "`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting "
+ "`use_cache=False`..."
+ )
+ use_cache = False
+ layer_outputs = self._gradient_checkpointing_func(
+ layer_module.__call__,
+ hidden_states,
+ bbox_pos_emb,
+ attention_mask,
+ layer_head_mask,
+ encoder_hidden_states,
+ encoder_attention_mask,
+ output_attentions,
+ )
+ else:
+ layer_outputs = layer_module(
+ hidden_states=hidden_states,
+ bbox_pos_emb=bbox_pos_emb,
+ attention_mask=attention_mask,
+ head_mask=layer_head_mask,
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=encoder_attention_mask,
+ past_key_value=past_key_value,
+ output_attentions=output_attentions,
+ )
+
+ hidden_states = layer_outputs[0]
+ if use_cache:
+ next_decoder_cache += (layer_outputs[-1],)
+ if output_attentions:
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
+ if self.config.add_cross_attention:
+ all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
+
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ if not return_dict:
+ return tuple(
+ v
+ for v in [
+ hidden_states,
+ next_decoder_cache,
+ all_hidden_states,
+ all_self_attentions,
+ all_cross_attentions,
+ ]
+ if v is not None
+ )
+ return BaseModelOutputWithPastAndCrossAttentions(
+ last_hidden_state=hidden_states,
+ past_key_values=next_decoder_cache,
+ hidden_states=all_hidden_states,
+ attentions=all_self_attentions,
+ cross_attentions=all_cross_attentions,
+ )
+
+
+# Copied from transformers.models.bert.modeling_bert.BertPooler with Bert->Bros
+class BrosPooler(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
+ self.activation = nn.Tanh()
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ # We "pool" the model by simply taking the hidden state corresponding
+ # to the first token.
+ first_token_tensor = hidden_states[:, 0]
+ pooled_output = self.dense(first_token_tensor)
+ pooled_output = self.activation(pooled_output)
+ return pooled_output
+
+
+class BrosRelationExtractor(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.n_relations = config.n_relations
+ self.backbone_hidden_size = config.hidden_size
+ self.head_hidden_size = config.hidden_size
+ self.classifier_dropout_prob = config.classifier_dropout_prob
+
+ self.drop = nn.Dropout(self.classifier_dropout_prob)
+ self.query = nn.Linear(self.backbone_hidden_size, self.n_relations * self.head_hidden_size)
+
+ self.key = nn.Linear(self.backbone_hidden_size, self.n_relations * self.head_hidden_size)
+
+ self.dummy_node = nn.Parameter(torch.zeros(1, self.backbone_hidden_size))
+
+ def forward(self, query_layer: torch.Tensor, key_layer: torch.Tensor):
+ query_layer = self.query(self.drop(query_layer))
+
+ dummy_vec = self.dummy_node.unsqueeze(0).repeat(1, key_layer.size(1), 1)
+ key_layer = torch.cat([key_layer, dummy_vec], axis=0)
+ key_layer = self.key(self.drop(key_layer))
+
+ query_layer = query_layer.view(
+ query_layer.size(0), query_layer.size(1), self.n_relations, self.head_hidden_size
+ )
+ key_layer = key_layer.view(key_layer.size(0), key_layer.size(1), self.n_relations, self.head_hidden_size)
+
+ relation_score = torch.matmul(
+ query_layer.permute(2, 1, 0, 3), key_layer.permute(2, 1, 3, 0)
+ ) # equivalent to torch.einsum("ibnd,jbnd->nbij", (query_layer, key_layer))
+
+ return relation_score
+
+
+class BrosPreTrainedModel(PreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = BrosConfig
+ base_model_prefix = "bros"
+
+ def _init_weights(self, module):
+ """Initialize the weights"""
+ if isinstance(module, nn.Linear):
+ # Slightly different from the TF version which uses truncated_normal for initialization
+ # cf https://github.com/pytorch/pytorch/pull/5617
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+ if module.bias is not None:
+ module.bias.data.zero_()
+ elif isinstance(module, nn.Embedding):
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+ if module.padding_idx is not None:
+ module.weight.data[module.padding_idx].zero_()
+ elif isinstance(module, nn.LayerNorm):
+ module.bias.data.zero_()
+ module.weight.data.fill_(1.0)
+
+
+@add_start_docstrings(
+ "The bare Bros Model transformer outputting raw hidden-states without any specific head on top.",
+ BROS_START_DOCSTRING,
+)
+class BrosModel(BrosPreTrainedModel):
+ def __init__(self, config, add_pooling_layer=True):
+ super().__init__(config)
+ self.config = config
+
+ self.embeddings = BrosTextEmbeddings(config)
+ self.bbox_embeddings = BrosBboxEmbeddings(config)
+ self.encoder = BrosEncoder(config)
+
+ self.pooler = BrosPooler(config) if add_pooling_layer else None
+
+ self.init_weights()
+
+ def get_input_embeddings(self):
+ return self.embeddings.word_embeddings
+
+ def set_input_embeddings(self, value):
+ self.embeddings.word_embeddings = value
+
+ def _prune_heads(self, heads_to_prune):
+ """
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
+ class PreTrainedModel
+ """
+ for layer, heads in heads_to_prune.items():
+ self.encoder.layer[layer].attention.prune_heads(heads)
+
+ @add_start_docstrings_to_model_forward(BROS_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @replace_return_docstrings(output_type=BaseModelOutputWithPoolingAndCrossAttentions, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ bbox: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ token_type_ids: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ encoder_hidden_states: Optional[torch.Tensor] = None,
+ encoder_attention_mask: Optional[torch.Tensor] = None,
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]:
+ r"""
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> import torch
+ >>> from transformers import BrosProcessor, BrosModel
+
+ >>> processor = BrosProcessor.from_pretrained("jinho8345/bros-base-uncased")
+
+ >>> model = BrosModel.from_pretrained("jinho8345/bros-base-uncased")
+
+ >>> encoding = processor("Hello, my dog is cute", add_special_tokens=False, return_tensors="pt")
+ >>> bbox = torch.tensor([[[0, 0, 1, 1]]]).repeat(1, encoding["input_ids"].shape[-1], 1)
+ >>> encoding["bbox"] = bbox
+
+ >>> outputs = model(**encoding)
+ >>> last_hidden_states = outputs.last_hidden_state
+ ```"""
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if self.config.is_decoder:
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
+ else:
+ use_cache = False
+
+ if input_ids is not None and inputs_embeds is not None:
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
+ elif input_ids is not None:
+ input_shape = input_ids.size()
+ elif inputs_embeds is not None:
+ input_shape = inputs_embeds.size()[:-1]
+ else:
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
+
+ if bbox is None:
+ raise ValueError("You have to specify bbox")
+
+ batch_size, seq_length = input_shape
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
+
+ # past_key_values_length
+ past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
+
+ if attention_mask is None:
+ attention_mask = torch.ones(input_shape, device=device)
+
+ if token_type_ids is None:
+ if hasattr(self.embeddings, "token_type_ids"):
+ buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length]
+ buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length)
+ token_type_ids = buffered_token_type_ids_expanded
+ else:
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
+
+ # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
+ # ourselves in which case we just need to make it broadcastable to all heads.
+ extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, device)
+
+ # If a 2D or 3D attention mask is provided for the cross-attention
+ # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
+ if self.config.is_decoder and encoder_hidden_states is not None:
+ encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
+ encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
+ if encoder_attention_mask is None:
+ encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
+ encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
+ else:
+ encoder_extended_attention_mask = None
+
+ # Prepare head mask if needed
+ # 1.0 in head_mask indicate we keep the head
+ # attention_probs has shape bsz x n_heads x N x N
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
+
+ embedding_output = self.embeddings(
+ input_ids=input_ids,
+ position_ids=position_ids,
+ token_type_ids=token_type_ids,
+ inputs_embeds=inputs_embeds,
+ past_key_values_length=past_key_values_length,
+ )
+
+ # if bbox has 2 points (4 float tensors) per token, convert it to 4 points (8 float tensors) per token
+ if bbox.shape[-1] == 4:
+ bbox = bbox[:, :, [0, 1, 2, 1, 2, 3, 0, 3]]
+ scaled_bbox = bbox * self.config.bbox_scale
+ bbox_position_embeddings = self.bbox_embeddings(scaled_bbox)
+
+ encoder_outputs = self.encoder(
+ embedding_output,
+ bbox_pos_emb=bbox_position_embeddings,
+ attention_mask=extended_attention_mask,
+ head_mask=head_mask,
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=encoder_extended_attention_mask,
+ past_key_values=past_key_values,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ sequence_output = encoder_outputs[0]
+ pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
+
+ if not return_dict:
+ return (sequence_output, pooled_output) + encoder_outputs[1:]
+
+ return BaseModelOutputWithPoolingAndCrossAttentions(
+ last_hidden_state=sequence_output,
+ pooler_output=pooled_output,
+ past_key_values=encoder_outputs.past_key_values,
+ hidden_states=encoder_outputs.hidden_states,
+ attentions=encoder_outputs.attentions,
+ cross_attentions=encoder_outputs.cross_attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ Bros Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
+ Named-Entity-Recognition (NER) tasks.
+ """,
+ BROS_START_DOCSTRING,
+)
+class BrosForTokenClassification(BrosPreTrainedModel):
+ _keys_to_ignore_on_load_unexpected = [r"pooler"]
+
+ def __init__(self, config):
+ super().__init__(config)
+ self.num_labels = config.num_labels
+
+ self.bros = BrosModel(config)
+ classifier_dropout = (
+ config.classifier_dropout if hasattr(config, "classifier_dropout") else config.hidden_dropout_prob
+ )
+ self.dropout = nn.Dropout(classifier_dropout)
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
+
+ self.init_weights()
+
+ @add_start_docstrings_to_model_forward(BROS_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @replace_return_docstrings(output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ bbox: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ bbox_first_token_mask: Optional[torch.Tensor] = None,
+ token_type_ids: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ labels: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple[torch.Tensor], TokenClassifierOutput]:
+ r"""
+
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> import torch
+ >>> from transformers import BrosProcessor, BrosForTokenClassification
+
+ >>> processor = BrosProcessor.from_pretrained("jinho8345/bros-base-uncased")
+
+ >>> model = BrosForTokenClassification.from_pretrained("jinho8345/bros-base-uncased")
+
+ >>> encoding = processor("Hello, my dog is cute", add_special_tokens=False, return_tensors="pt")
+ >>> bbox = torch.tensor([[[0, 0, 1, 1]]]).repeat(1, encoding["input_ids"].shape[-1], 1)
+ >>> encoding["bbox"] = bbox
+
+ >>> outputs = model(**encoding)
+ ```"""
+
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.bros(
+ input_ids,
+ bbox=bbox,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ sequence_output = outputs[0]
+
+ sequence_output = self.dropout(sequence_output)
+ logits = self.classifier(sequence_output)
+
+ loss = None
+ if labels is not None:
+ loss_fct = CrossEntropyLoss()
+ if bbox_first_token_mask is not None:
+ bbox_first_token_mask = bbox_first_token_mask.view(-1)
+ loss = loss_fct(
+ logits.view(-1, self.num_labels)[bbox_first_token_mask], labels.view(-1)[bbox_first_token_mask]
+ )
+ else:
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
+
+ if not return_dict:
+ output = (logits,) + outputs[2:]
+ return ((loss,) + output) if loss is not None else output
+
+ return TokenClassifierOutput(
+ loss=loss,
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ Bros Model with a token classification head on top (initial_token_layers and subsequent_token_layer on top of the
+ hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. The initial_token_classifier is used to
+ predict the first token of each entity, and the subsequent_token_classifier is used to predict the subsequent
+ tokens within an entity. Compared to BrosForTokenClassification, this model is more robust to serialization errors
+ since it predicts next token from one token.
+ """,
+ BROS_START_DOCSTRING,
+)
+class BrosSpadeEEForTokenClassification(BrosPreTrainedModel):
+ _keys_to_ignore_on_load_unexpected = [r"pooler"]
+
+ def __init__(self, config):
+ super().__init__(config)
+ self.config = config
+ self.num_labels = config.num_labels
+ self.n_relations = config.n_relations
+ self.backbone_hidden_size = config.hidden_size
+
+ self.bros = BrosModel(config)
+ classifier_dropout = (
+ config.classifier_dropout if hasattr(config, "classifier_dropout") else config.hidden_dropout_prob
+ )
+
+ # Initial token classification for Entity Extraction (NER)
+ self.initial_token_classifier = nn.Sequential(
+ nn.Dropout(classifier_dropout),
+ nn.Linear(config.hidden_size, config.hidden_size),
+ nn.Dropout(classifier_dropout),
+ nn.Linear(config.hidden_size, config.num_labels),
+ )
+
+ # Subsequent token classification for Entity Extraction (NER)
+ self.subsequent_token_classifier = BrosRelationExtractor(config)
+
+ self.init_weights()
+
+ @add_start_docstrings_to_model_forward(BROS_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @replace_return_docstrings(output_type=BrosSpadeOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ bbox: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ bbox_first_token_mask: Optional[torch.Tensor] = None,
+ token_type_ids: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ initial_token_labels: Optional[torch.Tensor] = None,
+ subsequent_token_labels: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple[torch.Tensor], BrosSpadeOutput]:
+ r"""
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> import torch
+ >>> from transformers import BrosProcessor, BrosSpadeEEForTokenClassification
+
+ >>> processor = BrosProcessor.from_pretrained("jinho8345/bros-base-uncased")
+
+ >>> model = BrosSpadeEEForTokenClassification.from_pretrained("jinho8345/bros-base-uncased")
+
+ >>> encoding = processor("Hello, my dog is cute", add_special_tokens=False, return_tensors="pt")
+ >>> bbox = torch.tensor([[[0, 0, 1, 1]]]).repeat(1, encoding["input_ids"].shape[-1], 1)
+ >>> encoding["bbox"] = bbox
+
+ >>> outputs = model(**encoding)
+ ```"""
+
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.bros(
+ input_ids=input_ids,
+ bbox=bbox,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ last_hidden_states = outputs[0]
+ last_hidden_states = last_hidden_states.transpose(0, 1).contiguous()
+ initial_token_logits = self.initial_token_classifier(last_hidden_states).transpose(0, 1).contiguous()
+ subsequent_token_logits = self.subsequent_token_classifier(last_hidden_states, last_hidden_states).squeeze(0)
+
+ # make subsequent token (sequence token classification) mask
+ inv_attention_mask = 1 - attention_mask
+ batch_size, max_seq_length = inv_attention_mask.shape
+ device = inv_attention_mask.device
+ invalid_token_mask = torch.cat([inv_attention_mask, torch.zeros([batch_size, 1]).to(device)], axis=1).bool()
+ subsequent_token_logits = subsequent_token_logits.masked_fill(
+ invalid_token_mask[:, None, :], torch.finfo(subsequent_token_logits.dtype).min
+ )
+ self_token_mask = torch.eye(max_seq_length, max_seq_length + 1).to(device).bool()
+ subsequent_token_logits = subsequent_token_logits.masked_fill(
+ self_token_mask[None, :, :], torch.finfo(subsequent_token_logits.dtype).min
+ )
+ subsequent_token_mask = attention_mask.view(-1).bool()
+
+ loss = None
+ if initial_token_labels is not None and subsequent_token_labels is not None:
+ loss_fct = CrossEntropyLoss()
+
+ # get initial token loss
+ initial_token_labels = initial_token_labels.view(-1)
+ if bbox_first_token_mask is not None:
+ bbox_first_token_mask = bbox_first_token_mask.view(-1)
+ initial_token_loss = loss_fct(
+ initial_token_logits.view(-1, self.num_labels)[bbox_first_token_mask],
+ initial_token_labels[bbox_first_token_mask],
+ )
+ else:
+ initial_token_loss = loss_fct(initial_token_logits.view(-1, self.num_labels), initial_token_labels)
+
+ subsequent_token_labels = subsequent_token_labels.view(-1)
+ subsequent_token_loss = loss_fct(
+ subsequent_token_logits.view(-1, max_seq_length + 1)[subsequent_token_mask],
+ subsequent_token_labels[subsequent_token_mask],
+ )
+
+ loss = initial_token_loss + subsequent_token_loss
+
+ if not return_dict:
+ output = (initial_token_logits, subsequent_token_logits) + outputs[2:]
+ return ((loss,) + output) if loss is not None else output
+
+ return BrosSpadeOutput(
+ loss=loss,
+ initial_token_logits=initial_token_logits,
+ subsequent_token_logits=subsequent_token_logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ Bros Model with a token classification head on top (a entity_linker layer on top of the hidden-states output) e.g.
+ for Entity-Linking. The entity_linker is used to predict intra-entity links (one entity to another entity).
+ """,
+ BROS_START_DOCSTRING,
+)
+class BrosSpadeELForTokenClassification(BrosPreTrainedModel):
+ _keys_to_ignore_on_load_unexpected = [r"pooler"]
+
+ def __init__(self, config):
+ super().__init__(config)
+ self.config = config
+ self.num_labels = config.num_labels
+ self.n_relations = config.n_relations
+ self.backbone_hidden_size = config.hidden_size
+
+ self.bros = BrosModel(config)
+ (config.classifier_dropout if hasattr(config, "classifier_dropout") else config.hidden_dropout_prob)
+
+ self.entity_linker = BrosRelationExtractor(config)
+
+ self.init_weights()
+
+ @add_start_docstrings_to_model_forward(BROS_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @replace_return_docstrings(output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ bbox: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ bbox_first_token_mask: Optional[torch.Tensor] = None,
+ token_type_ids: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ labels: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple[torch.Tensor], TokenClassifierOutput]:
+ r"""
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> import torch
+ >>> from transformers import BrosProcessor, BrosSpadeELForTokenClassification
+
+ >>> processor = BrosProcessor.from_pretrained("jinho8345/bros-base-uncased")
+
+ >>> model = BrosSpadeELForTokenClassification.from_pretrained("jinho8345/bros-base-uncased")
+
+ >>> encoding = processor("Hello, my dog is cute", add_special_tokens=False, return_tensors="pt")
+ >>> bbox = torch.tensor([[[0, 0, 1, 1]]]).repeat(1, encoding["input_ids"].shape[-1], 1)
+ >>> encoding["bbox"] = bbox
+
+ >>> outputs = model(**encoding)
+ ```"""
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.bros(
+ input_ids=input_ids,
+ bbox=bbox,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ last_hidden_states = outputs[0]
+ last_hidden_states = last_hidden_states.transpose(0, 1).contiguous()
+
+ logits = self.entity_linker(last_hidden_states, last_hidden_states).squeeze(0)
+
+ loss = None
+ if labels is not None:
+ loss_fct = CrossEntropyLoss()
+
+ batch_size, max_seq_length = attention_mask.shape
+ device = attention_mask.device
+
+ self_token_mask = torch.eye(max_seq_length, max_seq_length + 1).to(device).bool()
+
+ mask = bbox_first_token_mask.view(-1)
+ bbox_first_token_mask = torch.cat(
+ [
+ ~bbox_first_token_mask,
+ torch.zeros([batch_size, 1], dtype=torch.bool).to(device),
+ ],
+ axis=1,
+ )
+ logits = logits.masked_fill(bbox_first_token_mask[:, None, :], torch.finfo(logits.dtype).min)
+ logits = logits.masked_fill(self_token_mask[None, :, :], torch.finfo(logits.dtype).min)
+
+ loss = loss_fct(logits.view(-1, max_seq_length + 1)[mask], labels.view(-1)[mask])
+
+ if not return_dict:
+ output = (logits,) + outputs[2:]
+ return ((loss,) + output) if loss is not None else output
+
+ return TokenClassifierOutput(
+ loss=loss,
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+__all__ = [
+ "BrosPreTrainedModel",
+ "BrosModel",
+ "BrosForTokenClassification",
+ "BrosSpadeEEForTokenClassification",
+ "BrosSpadeELForTokenClassification",
+]
diff --git a/janus/lib/python3.10/site-packages/transformers/models/bros/processing_bros.py b/janus/lib/python3.10/site-packages/transformers/models/bros/processing_bros.py
new file mode 100644
index 0000000000000000000000000000000000000000..4687e7f8a86ae5ab4d1339517622f52023c57499
--- /dev/null
+++ b/janus/lib/python3.10/site-packages/transformers/models/bros/processing_bros.py
@@ -0,0 +1,112 @@
+# coding=utf-8
+# Copyright 2023 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+Processor class for Bros.
+"""
+
+from typing import List, Optional, Union
+
+from ...processing_utils import ProcessorMixin
+from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
+from ...utils import TensorType
+
+
+class BrosProcessor(ProcessorMixin):
+ r"""
+ Constructs a Bros processor which wraps a BERT tokenizer.
+
+ [`BrosProcessor`] offers all the functionalities of [`BertTokenizerFast`]. See the docstring of
+ [`~BrosProcessor.__call__`] and [`~BrosProcessor.decode`] for more information.
+
+ Args:
+ tokenizer (`BertTokenizerFast`, *optional*):
+ An instance of ['BertTokenizerFast`]. The tokenizer is a required input.
+ """
+
+ attributes = ["tokenizer"]
+ tokenizer_class = ("BertTokenizer", "BertTokenizerFast")
+
+ def __init__(self, tokenizer=None, **kwargs):
+ if tokenizer is None:
+ raise ValueError("You need to specify a `tokenizer`.")
+
+ super().__init__(tokenizer)
+
+ def __call__(
+ self,
+ text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None,
+ add_special_tokens: bool = True,
+ padding: Union[bool, str, PaddingStrategy] = False,
+ truncation: Union[bool, str, TruncationStrategy] = None,
+ max_length: Optional[int] = None,
+ stride: int = 0,
+ pad_to_multiple_of: Optional[int] = None,
+ return_token_type_ids: Optional[bool] = None,
+ return_attention_mask: Optional[bool] = None,
+ return_overflowing_tokens: bool = False,
+ return_special_tokens_mask: bool = False,
+ return_offsets_mapping: bool = False,
+ return_length: bool = False,
+ verbose: bool = True,
+ return_tensors: Optional[Union[str, TensorType]] = None,
+ **kwargs,
+ ) -> BatchEncoding:
+ """
+ This method uses [`BertTokenizerFast.__call__`] to prepare text for the model.
+
+ Please refer to the docstring of the above two methods for more information.
+ """
+ encoding = self.tokenizer(
+ text=text,
+ add_special_tokens=add_special_tokens,
+ padding=padding,
+ truncation=truncation,
+ max_length=max_length,
+ stride=stride,
+ pad_to_multiple_of=pad_to_multiple_of,
+ return_token_type_ids=return_token_type_ids,
+ return_attention_mask=return_attention_mask,
+ return_overflowing_tokens=return_overflowing_tokens,
+ return_special_tokens_mask=return_special_tokens_mask,
+ return_offsets_mapping=return_offsets_mapping,
+ return_length=return_length,
+ verbose=verbose,
+ return_tensors=return_tensors,
+ **kwargs,
+ )
+
+ return encoding
+
+ def batch_decode(self, *args, **kwargs):
+ """
+ This method forwards all its arguments to BertTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please
+ refer to the docstring of this method for more information.
+ """
+ return self.tokenizer.batch_decode(*args, **kwargs)
+
+ def decode(self, *args, **kwargs):
+ """
+ This method forwards all its arguments to BertTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to
+ the docstring of this method for more information.
+ """
+ return self.tokenizer.decode(*args, **kwargs)
+
+ @property
+ def model_input_names(self):
+ tokenizer_input_names = self.tokenizer.model_input_names
+ return list(dict.fromkeys(tokenizer_input_names))
+
+
+__all__ = ["BrosProcessor"]
diff --git a/janus/lib/python3.10/site-packages/transformers/models/clvp/__pycache__/__init__.cpython-310.pyc b/janus/lib/python3.10/site-packages/transformers/models/clvp/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..fb7896395a8ee90b3d65abbd031d3a15b84c768d
Binary files /dev/null and b/janus/lib/python3.10/site-packages/transformers/models/clvp/__pycache__/__init__.cpython-310.pyc differ
diff --git a/janus/lib/python3.10/site-packages/transformers/models/clvp/__pycache__/configuration_clvp.cpython-310.pyc b/janus/lib/python3.10/site-packages/transformers/models/clvp/__pycache__/configuration_clvp.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0b6f4a2534098cb9b67600481d27ebd4333b3692
Binary files /dev/null and b/janus/lib/python3.10/site-packages/transformers/models/clvp/__pycache__/configuration_clvp.cpython-310.pyc differ
diff --git a/janus/lib/python3.10/site-packages/transformers/models/clvp/__pycache__/feature_extraction_clvp.cpython-310.pyc b/janus/lib/python3.10/site-packages/transformers/models/clvp/__pycache__/feature_extraction_clvp.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d726fa01db61229c332daccd61264ae8064eacc6
Binary files /dev/null and b/janus/lib/python3.10/site-packages/transformers/models/clvp/__pycache__/feature_extraction_clvp.cpython-310.pyc differ
diff --git a/janus/lib/python3.10/site-packages/transformers/models/clvp/__pycache__/modeling_clvp.cpython-310.pyc b/janus/lib/python3.10/site-packages/transformers/models/clvp/__pycache__/modeling_clvp.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c5b83c1aee8b69ed55a49e67052bdc110f1e16d2
Binary files /dev/null and b/janus/lib/python3.10/site-packages/transformers/models/clvp/__pycache__/modeling_clvp.cpython-310.pyc differ
diff --git a/janus/lib/python3.10/site-packages/transformers/models/clvp/__pycache__/tokenization_clvp.cpython-310.pyc b/janus/lib/python3.10/site-packages/transformers/models/clvp/__pycache__/tokenization_clvp.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2ceb6e2fb1c934a0e901cea9f61dd77def45783e
Binary files /dev/null and b/janus/lib/python3.10/site-packages/transformers/models/clvp/__pycache__/tokenization_clvp.cpython-310.pyc differ
diff --git a/janus/lib/python3.10/site-packages/transformers/models/clvp/number_normalizer.py b/janus/lib/python3.10/site-packages/transformers/models/clvp/number_normalizer.py
new file mode 100644
index 0000000000000000000000000000000000000000..78240097277fee950ef8e49b4c8e05245463ed05
--- /dev/null
+++ b/janus/lib/python3.10/site-packages/transformers/models/clvp/number_normalizer.py
@@ -0,0 +1,237 @@
+# coding=utf-8
+# Copyright 2023 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""English Normalizer class for CLVP."""
+
+import re
+
+
+class EnglishNormalizer:
+ def __init__(self):
+ # List of (regular expression, replacement) pairs for abbreviations:
+ self._abbreviations = [
+ (re.compile("\\b%s\\." % x[0], re.IGNORECASE), x[1])
+ for x in [
+ ("mrs", "misess"),
+ ("mr", "mister"),
+ ("dr", "doctor"),
+ ("st", "saint"),
+ ("co", "company"),
+ ("jr", "junior"),
+ ("maj", "major"),
+ ("gen", "general"),
+ ("drs", "doctors"),
+ ("rev", "reverend"),
+ ("lt", "lieutenant"),
+ ("hon", "honorable"),
+ ("sgt", "sergeant"),
+ ("capt", "captain"),
+ ("esq", "esquire"),
+ ("ltd", "limited"),
+ ("col", "colonel"),
+ ("ft", "fort"),
+ ]
+ ]
+
+ self.ones = ["", "one", "two", "three", "four", "five", "six", "seven", "eight", "nine"]
+ self.teens = [
+ "ten",
+ "eleven",
+ "twelve",
+ "thirteen",
+ "fourteen",
+ "fifteen",
+ "sixteen",
+ "seventeen",
+ "eighteen",
+ "nineteen",
+ ]
+ self.tens = ["", "", "twenty", "thirty", "forty", "fifty", "sixty", "seventy", "eighty", "ninety"]
+
+ def number_to_words(self, num: int) -> str:
+ """
+ Converts numbers(`int`) to words(`str`).
+
+ Please note that it only supports upto - "'nine hundred ninety-nine quadrillion, nine hundred ninety-nine
+ trillion, nine hundred ninety-nine billion, nine hundred ninety-nine million, nine hundred ninety-nine
+ thousand, nine hundred ninety-nine'" or `number_to_words(999_999_999_999_999_999)`.
+ """
+ if num == 0:
+ return "zero"
+ elif num < 0:
+ return "minus " + self.number_to_words(abs(num))
+ elif num < 10:
+ return self.ones[num]
+ elif num < 20:
+ return self.teens[num - 10]
+ elif num < 100:
+ return self.tens[num // 10] + ("-" + self.number_to_words(num % 10) if num % 10 != 0 else "")
+ elif num < 1000:
+ return (
+ self.ones[num // 100] + " hundred" + (" " + self.number_to_words(num % 100) if num % 100 != 0 else "")
+ )
+ elif num < 1_000_000:
+ return (
+ self.number_to_words(num // 1000)
+ + " thousand"
+ + (", " + self.number_to_words(num % 1000) if num % 1000 != 0 else "")
+ )
+ elif num < 1_000_000_000:
+ return (
+ self.number_to_words(num // 1_000_000)
+ + " million"
+ + (", " + self.number_to_words(num % 1_000_000) if num % 1_000_000 != 0 else "")
+ )
+ elif num < 1_000_000_000_000:
+ return (
+ self.number_to_words(num // 1_000_000_000)
+ + " billion"
+ + (", " + self.number_to_words(num % 1_000_000_000) if num % 1_000_000_000 != 0 else "")
+ )
+ elif num < 1_000_000_000_000_000:
+ return (
+ self.number_to_words(num // 1_000_000_000_000)
+ + " trillion"
+ + (", " + self.number_to_words(num % 1_000_000_000_000) if num % 1_000_000_000_000 != 0 else "")
+ )
+ elif num < 1_000_000_000_000_000_000:
+ return (
+ self.number_to_words(num // 1_000_000_000_000_000)
+ + " quadrillion"
+ + (
+ ", " + self.number_to_words(num % 1_000_000_000_000_000)
+ if num % 1_000_000_000_000_000 != 0
+ else ""
+ )
+ )
+ else:
+ return "number out of range"
+
+ def convert_to_ascii(self, text: str) -> str:
+ """
+ Converts unicode to ascii
+ """
+ return text.encode("ascii", "ignore").decode("utf-8")
+
+ def _expand_dollars(self, m: str) -> str:
+ """
+ This method is used to expand numerical dollar values into spoken words.
+ """
+ match = m.group(1)
+ parts = match.split(".")
+ if len(parts) > 2:
+ return match + " dollars" # Unexpected format
+
+ dollars = int(parts[0]) if parts[0] else 0
+ cents = int(parts[1]) if len(parts) > 1 and parts[1] else 0
+ if dollars and cents:
+ dollar_unit = "dollar" if dollars == 1 else "dollars"
+ cent_unit = "cent" if cents == 1 else "cents"
+ return "%s %s, %s %s" % (dollars, dollar_unit, cents, cent_unit)
+ elif dollars:
+ dollar_unit = "dollar" if dollars == 1 else "dollars"
+ return "%s %s" % (dollars, dollar_unit)
+ elif cents:
+ cent_unit = "cent" if cents == 1 else "cents"
+ return "%s %s" % (cents, cent_unit)
+ else:
+ return "zero dollars"
+
+ def _remove_commas(self, m: str) -> str:
+ """
+ This method is used to remove commas from sentences.
+ """
+ return m.group(1).replace(",", "")
+
+ def _expand_decimal_point(self, m: str) -> str:
+ """
+ This method is used to expand '.' into spoken word ' point '.
+ """
+ return m.group(1).replace(".", " point ")
+
+ def _expand_ordinal(self, num: str) -> str:
+ """
+ This method is used to expand ordinals such as '1st', '2nd' into spoken words.
+ """
+ ordinal_suffixes = {1: "st", 2: "nd", 3: "rd"}
+
+ num = int(num.group(0)[:-2])
+ if 10 <= num % 100 and num % 100 <= 20:
+ suffix = "th"
+ else:
+ suffix = ordinal_suffixes.get(num % 10, "th")
+ return self.number_to_words(num) + suffix
+
+ def _expand_number(self, m: str) -> str:
+ """
+ This method acts as a preprocessing step for numbers between 1000 and 3000 (same as the original repository,
+ link :
+ https://github.com/neonbjb/tortoise-tts/blob/4003544b6ff4b68c09856e04d3eff9da26d023c2/tortoise/utils/tokenizer.py#L86)
+ """
+ num = int(m.group(0))
+
+ if num > 1000 and num < 3000:
+ if num == 2000:
+ return "two thousand"
+ elif num > 2000 and num < 2010:
+ return "two thousand " + self.number_to_words(num % 100)
+ elif num % 100 == 0:
+ return self.number_to_words(num // 100) + " hundred"
+ else:
+ return self.number_to_words(num)
+ else:
+ return self.number_to_words(num)
+
+ def normalize_numbers(self, text: str) -> str:
+ """
+ This method is used to normalize numbers within a text such as converting the numbers to words, removing
+ commas, etc.
+ """
+ text = re.sub(re.compile(r"([0-9][0-9\,]+[0-9])"), self._remove_commas, text)
+ text = re.sub(re.compile(r"£([0-9\,]*[0-9]+)"), r"\1 pounds", text)
+ text = re.sub(re.compile(r"\$([0-9\.\,]*[0-9]+)"), self._expand_dollars, text)
+ text = re.sub(re.compile(r"([0-9]+\.[0-9]+)"), self._expand_decimal_point, text)
+ text = re.sub(re.compile(r"[0-9]+(st|nd|rd|th)"), self._expand_ordinal, text)
+ text = re.sub(re.compile(r"[0-9]+"), self._expand_number, text)
+ return text
+
+ def expand_abbreviations(self, text: str) -> str:
+ """
+ Expands the abbreviate words.
+ """
+ for regex, replacement in self._abbreviations:
+ text = re.sub(regex, replacement, text)
+ return text
+
+ def collapse_whitespace(self, text: str) -> str:
+ """
+ Removes multiple whitespaces
+ """
+ return re.sub(re.compile(r"\s+"), " ", text)
+
+ def __call__(self, text):
+ """
+ Converts text to ascii, numbers / number-like quantities to their spelt-out counterparts and expands
+ abbreviations
+ """
+
+ text = self.convert_to_ascii(text)
+ text = text.lower()
+ text = self.normalize_numbers(text)
+ text = self.expand_abbreviations(text)
+ text = self.collapse_whitespace(text)
+ text = text.replace('"', "")
+
+ return text
diff --git a/janus/lib/python3.10/site-packages/transformers/models/clvp/processing_clvp.py b/janus/lib/python3.10/site-packages/transformers/models/clvp/processing_clvp.py
new file mode 100644
index 0000000000000000000000000000000000000000..3f4d54f259032f88880b904f3d48ec6f058914c3
--- /dev/null
+++ b/janus/lib/python3.10/site-packages/transformers/models/clvp/processing_clvp.py
@@ -0,0 +1,93 @@
+# coding=utf-8
+# Copyright 2023 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Processor class for CLVP
+"""
+
+from ...processing_utils import ProcessorMixin
+
+
+class ClvpProcessor(ProcessorMixin):
+ r"""
+ Constructs a CLVP processor which wraps a CLVP Feature Extractor and a CLVP Tokenizer into a single processor.
+
+ [`ClvpProcessor`] offers all the functionalities of [`ClvpFeatureExtractor`] and [`ClvpTokenizer`]. See the
+ [`~ClvpProcessor.__call__`], [`~ClvpProcessor.decode`] and [`~ClvpProcessor.batch_decode`] for more information.
+
+ Args:
+ feature_extractor (`ClvpFeatureExtractor`):
+ An instance of [`ClvpFeatureExtractor`]. The feature extractor is a required input.
+ tokenizer (`ClvpTokenizer`):
+ An instance of [`ClvpTokenizer`]. The tokenizer is a required input.
+ """
+
+ feature_extractor_class = "ClvpFeatureExtractor"
+ tokenizer_class = "ClvpTokenizer"
+ model_input_names = [
+ "input_ids",
+ "input_features",
+ "attention_mask",
+ ]
+
+ def __init__(self, feature_extractor, tokenizer):
+ super().__init__(feature_extractor, tokenizer)
+
+ def __call__(self, *args, **kwargs):
+ """
+ Forwards the `audio` and `sampling_rate` arguments to [`~ClvpFeatureExtractor.__call__`] and the `text`
+ argument to [`~ClvpTokenizer.__call__`]. Please refer to the doctsring of the above two methods for more
+ information.
+ """
+
+ raw_speech = kwargs.pop("raw_speech", None)
+ sampling_rate = kwargs.pop("sampling_rate", None)
+ text = kwargs.pop("text", None)
+
+ if raw_speech is None and text is None:
+ raise ValueError("You need to specify either an `raw_speech` or `text` input to process.")
+
+ if raw_speech is not None:
+ inputs = self.feature_extractor(raw_speech, sampling_rate=sampling_rate, **kwargs)
+ if text is not None:
+ encodings = self.tokenizer(text, **kwargs)
+
+ if text is None:
+ return inputs
+ elif raw_speech is None:
+ return encodings
+ else:
+ inputs["input_ids"] = encodings["input_ids"]
+ inputs["attention_mask"] = encodings["attention_mask"]
+ return inputs
+
+ # Copied from transformers.models.whisper.processing_whisper.WhisperProcessor.batch_decode with Whisper->Clvp
+ def batch_decode(self, *args, **kwargs):
+ """
+ This method forwards all its arguments to ClvpTokenizer's [`~PreTrainedTokenizer.batch_decode`]. Please
+ refer to the docstring of this method for more information.
+ """
+ return self.tokenizer.batch_decode(*args, **kwargs)
+
+ # Copied from transformers.models.whisper.processing_whisper.WhisperProcessor.decode with Whisper->Clvp
+ def decode(self, *args, **kwargs):
+ """
+ This method forwards all its arguments to ClvpTokenizer's [`~PreTrainedTokenizer.decode`]. Please refer to
+ the docstring of this method for more information.
+ """
+ return self.tokenizer.decode(*args, **kwargs)
+
+
+__all__ = ["ClvpProcessor"]
diff --git a/janus/lib/python3.10/site-packages/transformers/models/dinov2_with_registers/__pycache__/__init__.cpython-310.pyc b/janus/lib/python3.10/site-packages/transformers/models/dinov2_with_registers/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..057ff87372be2cfafc53ee005327d910a6365351
Binary files /dev/null and b/janus/lib/python3.10/site-packages/transformers/models/dinov2_with_registers/__pycache__/__init__.cpython-310.pyc differ
diff --git a/janus/lib/python3.10/site-packages/transformers/models/dinov2_with_registers/modeling_dinov2_with_registers.py b/janus/lib/python3.10/site-packages/transformers/models/dinov2_with_registers/modeling_dinov2_with_registers.py
new file mode 100644
index 0000000000000000000000000000000000000000..bd9d181cdf35c19e40858b866f48a2280ab7d59c
--- /dev/null
+++ b/janus/lib/python3.10/site-packages/transformers/models/dinov2_with_registers/modeling_dinov2_with_registers.py
@@ -0,0 +1,946 @@
+# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
+# This file was automatically generated from src/transformers/models/dinov2_with_registers/modular_dinov2_with_registers.py.
+# Do NOT edit this file manually as any edits will be overwritten by the generation of
+# the file from the modular. If any change should be done, please apply the change to the
+# modular_dinov2_with_registers.py file directly. One of our CI enforces this.
+# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
+# coding=utf-8
+# Copyright 2024 Meta Inc. and the HuggingFace Inc. team. All rights reserved.
+#
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import collections.abc
+import math
+from typing import Dict, List, Optional, Set, Tuple, Union
+
+import torch
+from torch import nn
+from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
+
+from ...activations import ACT2FN
+from ...modeling_outputs import BackboneOutput, BaseModelOutput, BaseModelOutputWithPooling, ImageClassifierOutput
+from ...modeling_utils import PreTrainedModel
+from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer
+from ...utils import (
+ add_code_sample_docstrings,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ logging,
+ replace_return_docstrings,
+ torch_int,
+)
+from ...utils.backbone_utils import BackboneMixin
+from .configuration_dinov2_with_registers import Dinov2WithRegistersConfig
+
+
+logger = logging.get_logger(__name__)
+
+# Base docstring
+_CHECKPOINT_FOR_DOC = "facebook/dinov2_with_registers-base"
+
+# General docstring
+_CONFIG_FOR_DOC = "Dinov2WithRegistersConfig"
+
+
+class Dinov2WithRegistersPatchEmbeddings(nn.Module):
+ """
+ This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial
+ `hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a
+ Transformer.
+ """
+
+ def __init__(self, config):
+ super().__init__()
+ image_size, patch_size = config.image_size, config.patch_size
+ num_channels, hidden_size = config.num_channels, config.hidden_size
+
+ image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size)
+ patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size)
+ num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
+ self.image_size = image_size
+ self.patch_size = patch_size
+ self.num_channels = num_channels
+ self.num_patches = num_patches
+
+ self.projection = nn.Conv2d(num_channels, hidden_size, kernel_size=patch_size, stride=patch_size)
+
+ def forward(self, pixel_values: torch.Tensor) -> torch.Tensor:
+ num_channels = pixel_values.shape[1]
+ if num_channels != self.num_channels:
+ raise ValueError(
+ "Make sure that the channel dimension of the pixel values match with the one set in the configuration."
+ f" Expected {self.num_channels} but got {num_channels}."
+ )
+ embeddings = self.projection(pixel_values).flatten(2).transpose(1, 2)
+ return embeddings
+
+
+class Dinov2WithRegistersEmbeddings(nn.Module):
+ """
+ Construct the CLS token, mask token, register tokens, position and patch embeddings.
+ """
+
+ def __init__(self, config: Dinov2WithRegistersConfig) -> None:
+ super().__init__()
+
+ self.cls_token = nn.Parameter(torch.randn(1, 1, config.hidden_size))
+ self.mask_token = nn.Parameter(torch.zeros(1, config.hidden_size))
+ self.register_tokens = nn.Parameter(torch.zeros(1, config.num_register_tokens, config.hidden_size))
+ self.patch_embeddings = Dinov2WithRegistersPatchEmbeddings(config)
+ num_patches = self.patch_embeddings.num_patches
+ self.position_embeddings = nn.Parameter(torch.randn(1, num_patches + 1, config.hidden_size))
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+ self.patch_size = config.patch_size
+ self.config = config
+
+ def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor:
+ """
+ This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher
+ resolution images. This implementation supports torch.jit tracing while maintaining backwards compatibility
+ with the original implementation.
+
+ Adapted from:
+ - https://github.com/facebookresearch/dino/blob/main/vision_transformer.py
+ - https://github.com/facebookresearch/dinov2/blob/main/dinov2/models/vision_transformer.py
+ """
+ num_patches = embeddings.shape[1] - 1
+ num_positions = self.position_embeddings.shape[1] - 1
+
+ # Skip interpolation for matching dimensions (unless tracing)
+ if not torch.jit.is_tracing() and num_patches == num_positions and height == width:
+ return self.position_embeddings
+
+ # Handle class token and patch embeddings separately
+ class_pos_embed = self.position_embeddings[:, 0]
+ patch_pos_embed = self.position_embeddings[:, 1:]
+ dim = embeddings.shape[-1]
+
+ # Calculate new dimensions
+ height = height // self.config.patch_size
+ width = width // self.config.patch_size
+
+ # Reshape for interpolation
+ sqrt_num_positions = torch_int(num_positions**0.5)
+ patch_pos_embed = patch_pos_embed.reshape(1, sqrt_num_positions, sqrt_num_positions, dim)
+ patch_pos_embed = patch_pos_embed.permute(0, 3, 1, 2)
+
+ # Store original dtype for restoration after interpolation
+ target_dtype = patch_pos_embed.dtype
+
+ # Interpolate at float32 precision
+ patch_pos_embed = nn.functional.interpolate(
+ patch_pos_embed.to(dtype=torch.float32),
+ size=(torch_int(height), torch_int(width)), # Explicit size instead of scale_factor
+ mode="bicubic",
+ align_corners=False,
+ antialias=True,
+ ).to(dtype=target_dtype)
+
+ # Validate output dimensions if not tracing
+ if not torch.jit.is_tracing():
+ if int(height) != patch_pos_embed.shape[-2] or int(width) != patch_pos_embed.shape[-1]:
+ raise ValueError("Width or height does not match with the interpolated position embeddings")
+
+ # Reshape back to original format
+ patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
+
+ # Combine class and patch embeddings
+ return torch.cat((class_pos_embed.unsqueeze(0), patch_pos_embed), dim=1)
+
+ def forward(self, pixel_values: torch.Tensor, bool_masked_pos: Optional[torch.Tensor] = None) -> torch.Tensor:
+ batch_size, _, height, width = pixel_values.shape
+ target_dtype = self.patch_embeddings.projection.weight.dtype
+ embeddings = self.patch_embeddings(pixel_values.to(dtype=target_dtype))
+
+ if bool_masked_pos is not None:
+ embeddings = torch.where(
+ bool_masked_pos.unsqueeze(-1), self.mask_token.to(embeddings.dtype).unsqueeze(0), embeddings
+ )
+
+ # add the [CLS] token to the embedded patch tokens
+ cls_tokens = self.cls_token.expand(batch_size, -1, -1)
+ embeddings = torch.cat((cls_tokens, embeddings), dim=1)
+
+ # add positional encoding to each token
+ embeddings = embeddings + self.interpolate_pos_encoding(embeddings, height, width)
+
+ # add register tokens
+ embeddings = torch.cat(
+ (embeddings[:, :1], self.register_tokens.expand(embeddings.shape[0], -1, -1), embeddings[:, 1:]), dim=1
+ )
+
+ embeddings = self.dropout(embeddings)
+
+ return embeddings
+
+
+class Dinov2WithRegistersSelfAttention(nn.Module):
+ def __init__(self, config: Dinov2WithRegistersConfig) -> None:
+ super().__init__()
+ if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
+ raise ValueError(
+ f"The hidden size {config.hidden_size,} is not a multiple of the number of attention "
+ f"heads {config.num_attention_heads}."
+ )
+
+ self.num_attention_heads = config.num_attention_heads
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
+
+ self.query = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias)
+ self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias)
+ self.value = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias)
+
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
+
+ def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor:
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
+ x = x.view(new_x_shape)
+ return x.permute(0, 2, 1, 3)
+
+ def forward(
+ self, hidden_states, head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False
+ ) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]:
+ mixed_query_layer = self.query(hidden_states)
+
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
+ query_layer = self.transpose_for_scores(mixed_query_layer)
+
+ # Take the dot product between "query" and "key" to get the raw attention scores.
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
+
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
+
+ # Normalize the attention scores to probabilities.
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
+
+ # This is actually dropping out entire tokens to attend to, which might
+ # seem a bit unusual, but is taken from the original Transformer paper.
+ attention_probs = self.dropout(attention_probs)
+
+ # Mask heads if we want to
+ if head_mask is not None:
+ attention_probs = attention_probs * head_mask
+
+ context_layer = torch.matmul(attention_probs, value_layer)
+
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
+ context_layer = context_layer.view(new_context_layer_shape)
+
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
+
+ return outputs
+
+
+class Dinov2WithRegistersSdpaSelfAttention(Dinov2WithRegistersSelfAttention):
+ def __init__(self, config: Dinov2WithRegistersConfig) -> None:
+ super().__init__(config)
+ self.attention_probs_dropout_prob = config.attention_probs_dropout_prob
+
+ def forward(
+ self, hidden_states, head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False
+ ) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]:
+ if output_attentions:
+ # TODO: Improve this warning with e.g. `model.config.attn_implementation = "manual"` once this is implemented.
+ logger.warning_once(
+ "Dinov2WithRegistersModel is using Dinov2WithRegistersSdpaSelfAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to the manual attention implementation, "
+ 'but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.'
+ )
+ return super().forward(
+ hidden_states=hidden_states, head_mask=head_mask, output_attentions=output_attentions
+ )
+
+ mixed_query_layer = self.query(hidden_states)
+
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
+ query_layer = self.transpose_for_scores(mixed_query_layer)
+
+ context_layer = torch.nn.functional.scaled_dot_product_attention(
+ query_layer,
+ key_layer,
+ value_layer,
+ head_mask,
+ self.attention_probs_dropout_prob if self.training else 0.0,
+ is_causal=False,
+ scale=None,
+ )
+
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
+ context_layer = context_layer.view(new_context_layer_shape)
+
+ return context_layer, None
+
+
+class Dinov2WithRegistersSelfOutput(nn.Module):
+ """
+ The residual connection is defined in Dinov2WithRegistersLayer instead of here (as is the case with other models), due to the
+ layernorm applied before each block.
+ """
+
+ def __init__(self, config: Dinov2WithRegistersConfig) -> None:
+ super().__init__()
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+
+ return hidden_states
+
+
+class Dinov2WithRegistersAttention(nn.Module):
+ def __init__(self, config: Dinov2WithRegistersConfig) -> None:
+ super().__init__()
+ self.attention = Dinov2WithRegistersSelfAttention(config)
+ self.output = Dinov2WithRegistersSelfOutput(config)
+ self.pruned_heads = set()
+
+ def prune_heads(self, heads: Set[int]) -> None:
+ if len(heads) == 0:
+ return
+ heads, index = find_pruneable_heads_and_indices(
+ heads, self.attention.num_attention_heads, self.attention.attention_head_size, self.pruned_heads
+ )
+
+ # Prune linear layers
+ self.attention.query = prune_linear_layer(self.attention.query, index)
+ self.attention.key = prune_linear_layer(self.attention.key, index)
+ self.attention.value = prune_linear_layer(self.attention.value, index)
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
+
+ # Update hyper params and store pruned heads
+ self.attention.num_attention_heads = self.attention.num_attention_heads - len(heads)
+ self.attention.all_head_size = self.attention.attention_head_size * self.attention.num_attention_heads
+ self.pruned_heads = self.pruned_heads.union(heads)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ head_mask: Optional[torch.Tensor] = None,
+ output_attentions: bool = False,
+ ) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]:
+ self_outputs = self.attention(hidden_states, head_mask, output_attentions)
+
+ attention_output = self.output(self_outputs[0], hidden_states)
+
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
+ return outputs
+
+
+class Dinov2WithRegistersSdpaAttention(Dinov2WithRegistersAttention):
+ def __init__(self, config: Dinov2WithRegistersConfig) -> None:
+ super().__init__(config)
+ self.attention = Dinov2WithRegistersSdpaSelfAttention(config)
+
+
+class Dinov2WithRegistersLayerScale(nn.Module):
+ def __init__(self, config) -> None:
+ super().__init__()
+ self.lambda1 = nn.Parameter(config.layerscale_value * torch.ones(config.hidden_size))
+
+ def forward(self, hidden_state: torch.Tensor) -> torch.Tensor:
+ return hidden_state * self.lambda1
+
+
+def drop_path(input: torch.Tensor, drop_prob: float = 0.0, training: bool = False) -> torch.Tensor:
+ """
+ Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
+
+ Comment by Ross Wightman: This is the same as the DropConnect impl I created for EfficientNet, etc networks,
+ however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
+ See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the
+ layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the
+ argument.
+ """
+ if drop_prob == 0.0 or not training:
+ return input
+ keep_prob = 1 - drop_prob
+ shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
+ random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device)
+ random_tensor.floor_() # binarize
+ output = input.div(keep_prob) * random_tensor
+ return output
+
+
+class Dinov2WithRegistersDropPath(nn.Module):
+ """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
+
+ def __init__(self, drop_prob: Optional[float] = None) -> None:
+ super().__init__()
+ self.drop_prob = drop_prob
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ return drop_path(hidden_states, self.drop_prob, self.training)
+
+ def extra_repr(self) -> str:
+ return "p={}".format(self.drop_prob)
+
+
+class Dinov2WithRegistersMLP(nn.Module):
+ def __init__(self, config) -> None:
+ super().__init__()
+ in_features = out_features = config.hidden_size
+ hidden_features = int(config.hidden_size * config.mlp_ratio)
+ self.fc1 = nn.Linear(in_features, hidden_features, bias=True)
+ if isinstance(config.hidden_act, str):
+ self.activation = ACT2FN[config.hidden_act]
+ else:
+ self.activation = config.hidden_act
+ self.fc2 = nn.Linear(hidden_features, out_features, bias=True)
+
+ def forward(self, hidden_state: torch.Tensor) -> torch.Tensor:
+ hidden_state = self.fc1(hidden_state)
+ hidden_state = self.activation(hidden_state)
+ hidden_state = self.fc2(hidden_state)
+ return hidden_state
+
+
+class Dinov2WithRegistersSwiGLUFFN(nn.Module):
+ def __init__(self, config) -> None:
+ super().__init__()
+ in_features = out_features = config.hidden_size
+ hidden_features = int(config.hidden_size * config.mlp_ratio)
+ hidden_features = (int(hidden_features * 2 / 3) + 7) // 8 * 8
+
+ self.weights_in = nn.Linear(in_features, 2 * hidden_features, bias=True)
+ self.weights_out = nn.Linear(hidden_features, out_features, bias=True)
+
+ def forward(self, hidden_state: torch.Tensor) -> torch.Tensor:
+ hidden_state = self.weights_in(hidden_state)
+ x1, x2 = hidden_state.chunk(2, dim=-1)
+ hidden = nn.functional.silu(x1) * x2
+ return self.weights_out(hidden)
+
+
+DINOV2_WITH_REGISTERS_ATTENTION_CLASSES = {
+ "eager": Dinov2WithRegistersAttention,
+ "sdpa": Dinov2WithRegistersSdpaAttention,
+}
+
+
+class Dinov2WithRegistersLayer(nn.Module):
+ """This corresponds to the Block class in the original implementation."""
+
+ def __init__(self, config: Dinov2WithRegistersConfig) -> None:
+ super().__init__()
+
+ self.norm1 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+ self.attention = DINOV2_WITH_REGISTERS_ATTENTION_CLASSES[config._attn_implementation](config)
+ self.layer_scale1 = Dinov2WithRegistersLayerScale(config)
+ self.drop_path = (
+ Dinov2WithRegistersDropPath(config.drop_path_rate) if config.drop_path_rate > 0.0 else nn.Identity()
+ )
+
+ self.norm2 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+
+ if config.use_swiglu_ffn:
+ self.mlp = Dinov2WithRegistersSwiGLUFFN(config)
+ else:
+ self.mlp = Dinov2WithRegistersMLP(config)
+ self.layer_scale2 = Dinov2WithRegistersLayerScale(config)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ head_mask: Optional[torch.Tensor] = None,
+ output_attentions: bool = False,
+ ) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]:
+ self_attention_outputs = self.attention(
+ self.norm1(hidden_states), # in Dinov2WithRegisters, layernorm is applied before self-attention
+ head_mask,
+ output_attentions=output_attentions,
+ )
+ attention_output = self_attention_outputs[0]
+
+ attention_output = self.layer_scale1(attention_output)
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
+
+ # first residual connection
+ hidden_states = self.drop_path(attention_output) + hidden_states
+
+ # in Dinov2WithRegisters, layernorm is also applied after self-attention
+ layer_output = self.norm2(hidden_states)
+ layer_output = self.mlp(layer_output)
+ layer_output = self.layer_scale2(layer_output)
+
+ # second residual connection
+ layer_output = self.drop_path(layer_output) + hidden_states
+
+ outputs = (layer_output,) + outputs
+
+ return outputs
+
+
+class Dinov2WithRegistersEncoder(nn.Module):
+ def __init__(self, config: Dinov2WithRegistersConfig) -> None:
+ super().__init__()
+ self.config = config
+ self.layer = nn.ModuleList([Dinov2WithRegistersLayer(config) for _ in range(config.num_hidden_layers)])
+ self.gradient_checkpointing = False
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ head_mask: Optional[torch.Tensor] = None,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ return_dict: bool = True,
+ ) -> Union[tuple, BaseModelOutput]:
+ all_hidden_states = () if output_hidden_states else None
+ all_self_attentions = () if output_attentions else None
+
+ for i, layer_module in enumerate(self.layer):
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ layer_head_mask = head_mask[i] if head_mask is not None else None
+
+ if self.gradient_checkpointing and self.training:
+ layer_outputs = self._gradient_checkpointing_func(
+ layer_module.__call__,
+ hidden_states,
+ layer_head_mask,
+ output_attentions,
+ )
+ else:
+ layer_outputs = layer_module(hidden_states, layer_head_mask, output_attentions)
+
+ hidden_states = layer_outputs[0]
+
+ if output_attentions:
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
+
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ if not return_dict:
+ return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
+ return BaseModelOutput(
+ last_hidden_state=hidden_states,
+ hidden_states=all_hidden_states,
+ attentions=all_self_attentions,
+ )
+
+
+class Dinov2WithRegistersPreTrainedModel(PreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = Dinov2WithRegistersConfig
+ base_model_prefix = "dinov2_with_registers"
+ main_input_name = "pixel_values"
+ supports_gradient_checkpointing = True
+ _no_split_modules = ["Dinov2WithRegistersSwiGLUFFN"]
+ _supports_sdpa = True
+
+ def _init_weights(self, module: Union[nn.Linear, nn.Conv2d, nn.LayerNorm]) -> None:
+ """Initialize the weights"""
+ if isinstance(module, (nn.Linear, nn.Conv2d)):
+ # Upcast the input in `fp32` and cast it back to desired `dtype` to avoid
+ # `trunc_normal_cpu` not implemented in `half` issues
+ module.weight.data = nn.init.trunc_normal_(
+ module.weight.data.to(torch.float32), mean=0.0, std=self.config.initializer_range
+ ).to(module.weight.dtype)
+ if module.bias is not None:
+ module.bias.data.zero_()
+ elif isinstance(module, nn.LayerNorm):
+ module.bias.data.zero_()
+ module.weight.data.fill_(1.0)
+ elif isinstance(module, Dinov2WithRegistersEmbeddings):
+ module.position_embeddings.data = nn.init.trunc_normal_(
+ module.position_embeddings.data.to(torch.float32),
+ mean=0.0,
+ std=self.config.initializer_range,
+ ).to(module.position_embeddings.dtype)
+
+ module.cls_token.data = nn.init.trunc_normal_(
+ module.cls_token.data.to(torch.float32),
+ mean=0.0,
+ std=self.config.initializer_range,
+ ).to(module.cls_token.dtype)
+
+
+_EXPECTED_OUTPUT_SHAPE = [1, 257, 768]
+
+
+DINOV2_WITH_REGISTERS_START_DOCSTRING = r"""
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
+ as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
+ behavior.
+
+ Parameters:
+ config ([`Dinov2WithRegistersConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+DINOV2_WITH_REGISTERS_BASE_INPUTS_DOCSTRING = r"""
+ Args:
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
+ Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
+ [`BitImageProcessor.preprocess`] for details.
+
+ bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, sequence_length)`):
+ Boolean masked positions. Indicates which patches are masked (1) and which aren't (0). Only relevant for
+ pre-training.
+
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+@add_start_docstrings(
+ "The bare Dinov2WithRegisters Model transformer outputting raw hidden-states without any specific head on top.",
+ DINOV2_WITH_REGISTERS_START_DOCSTRING,
+)
+class Dinov2WithRegistersModel(Dinov2WithRegistersPreTrainedModel):
+ def __init__(self, config: Dinov2WithRegistersConfig):
+ super().__init__(config)
+ self.config = config
+
+ self.embeddings = Dinov2WithRegistersEmbeddings(config)
+ self.encoder = Dinov2WithRegistersEncoder(config)
+
+ self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self) -> Dinov2WithRegistersPatchEmbeddings:
+ return self.embeddings.patch_embeddings
+
+ def _prune_heads(self, heads_to_prune: Dict[int, List[int]]) -> None:
+ """
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
+ class PreTrainedModel
+ """
+ for layer, heads in heads_to_prune.items():
+ self.encoder.layer[layer].attention.prune_heads(heads)
+
+ @add_start_docstrings_to_model_forward(DINOV2_WITH_REGISTERS_BASE_INPUTS_DOCSTRING)
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=BaseModelOutputWithPooling,
+ config_class=_CONFIG_FOR_DOC,
+ modality="vision",
+ expected_output=_EXPECTED_OUTPUT_SHAPE,
+ )
+ def forward(
+ self,
+ pixel_values: Optional[torch.Tensor] = None,
+ bool_masked_pos: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, BaseModelOutputWithPooling]:
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if pixel_values is None:
+ raise ValueError("You have to specify pixel_values")
+
+ # Prepare head mask if needed
+ # 1.0 in head_mask indicate we keep the head
+ # attention_probs has shape bsz x n_heads x N x N
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
+
+ embedding_output = self.embeddings(pixel_values, bool_masked_pos=bool_masked_pos)
+
+ encoder_outputs = self.encoder(
+ embedding_output,
+ head_mask=head_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ sequence_output = encoder_outputs[0]
+ sequence_output = self.layernorm(sequence_output)
+ pooled_output = sequence_output[:, 0, :]
+
+ if not return_dict:
+ head_outputs = (sequence_output, pooled_output)
+ return head_outputs + encoder_outputs[1:]
+
+ return BaseModelOutputWithPooling(
+ last_hidden_state=sequence_output,
+ pooler_output=pooled_output,
+ hidden_states=encoder_outputs.hidden_states,
+ attentions=encoder_outputs.attentions,
+ )
+
+
+# Image classification docstring
+_IMAGE_CLASS_CHECKPOINT = "facebook/dinov2_with_registers-small-imagenet1k-1-layer"
+_IMAGE_CLASS_EXPECTED_OUTPUT = "tabby, tabby cat"
+
+DINOV2_WITH_REGISTERS_INPUTS_DOCSTRING = r"""
+ Args:
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
+ Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
+ [`BitImageProcessor.preprocess`] for details.
+
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+@add_start_docstrings(
+ """
+ Dinov2WithRegisters Model transformer with an image classification head on top (a linear layer on top of the final hidden state
+ of the [CLS] token) e.g. for ImageNet.
+ """,
+ DINOV2_WITH_REGISTERS_START_DOCSTRING,
+)
+class Dinov2WithRegistersForImageClassification(Dinov2WithRegistersPreTrainedModel):
+ def __init__(self, config: Dinov2WithRegistersConfig) -> None:
+ super().__init__(config)
+
+ self.num_labels = config.num_labels
+ self.dinov2_with_registers = Dinov2WithRegistersModel(config)
+
+ # Classifier head
+ self.classifier = (
+ nn.Linear(config.hidden_size * 2, config.num_labels) if config.num_labels > 0 else nn.Identity()
+ )
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(DINOV2_WITH_REGISTERS_INPUTS_DOCSTRING)
+ @add_code_sample_docstrings(
+ checkpoint=_IMAGE_CLASS_CHECKPOINT,
+ output_type=ImageClassifierOutput,
+ config_class=_CONFIG_FOR_DOC,
+ expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT,
+ )
+ def forward(
+ self,
+ pixel_values: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ labels: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[tuple, ImageClassifierOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.dinov2_with_registers(
+ pixel_values,
+ head_mask=head_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ sequence_output = outputs[0] # batch_size, sequence_length, hidden_size
+
+ cls_token = sequence_output[:, 0]
+ patch_tokens = sequence_output[:, 1:]
+
+ linear_input = torch.cat([cls_token, patch_tokens.mean(dim=1)], dim=1)
+
+ logits = self.classifier(linear_input)
+
+ loss = None
+ if labels is not None:
+ # move labels to correct device to enable model parallelism
+ labels = labels.to(logits.device)
+ if self.config.problem_type is None:
+ if self.num_labels == 1:
+ self.config.problem_type = "regression"
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
+ self.config.problem_type = "single_label_classification"
+ else:
+ self.config.problem_type = "multi_label_classification"
+
+ if self.config.problem_type == "regression":
+ loss_fct = MSELoss()
+ if self.num_labels == 1:
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
+ else:
+ loss = loss_fct(logits, labels)
+ elif self.config.problem_type == "single_label_classification":
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
+ elif self.config.problem_type == "multi_label_classification":
+ loss_fct = BCEWithLogitsLoss()
+ loss = loss_fct(logits, labels)
+
+ if not return_dict:
+ output = (logits,) + outputs[2:]
+ return ((loss,) + output) if loss is not None else output
+
+ return ImageClassifierOutput(
+ loss=loss,
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ Dinov2WithRegisters backbone, to be used with frameworks like DETR and MaskFormer.
+ """,
+ DINOV2_WITH_REGISTERS_START_DOCSTRING,
+)
+class Dinov2WithRegistersBackbone(Dinov2WithRegistersPreTrainedModel, BackboneMixin):
+ def __init__(self, config):
+ super().__init__(config)
+ super()._init_backbone(config)
+ self.num_features = [config.hidden_size for _ in range(config.num_hidden_layers + 1)]
+ self.embeddings = Dinov2WithRegistersEmbeddings(config)
+ self.encoder = Dinov2WithRegistersEncoder(config)
+
+ self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+
+ self.num_register_tokens = config.num_register_tokens
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self) -> Dinov2WithRegistersPatchEmbeddings:
+ return self.embeddings.patch_embeddings
+
+ @add_start_docstrings_to_model_forward(DINOV2_WITH_REGISTERS_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=BackboneOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ pixel_values: torch.Tensor,
+ output_hidden_states: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> BackboneOutput:
+ """
+ Returns:
+
+ Examples:
+ Returns:
+
+ Examples:
+
+
+ ```python
+ >>> from transformers import AutoImageProcessor, AutoBackbone
+ >>> import torch
+ >>> from PIL import Image
+ >>> import requests
+
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
+ >>> image = Image.open(requests.get(url, stream=True).raw)
+
+ >>> processor = AutoImageProcessor.from_pretrained("facebook/dinov2-with-registers-base")
+ >>> model = AutoBackbone.from_pretrained(
+ ... "facebook/dinov2-with-registers-base", out_features=["stage2", "stage5", "stage8", "stage11"]
+ ... )
+
+ >>> inputs = processor(image, return_tensors="pt")
+
+ >>> outputs = model(**inputs)
+ >>> feature_maps = outputs.feature_maps
+ >>> list(feature_maps[-1].shape)
+ [1, 768, 16, 16]
+ ```"""
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+
+ embedding_output = self.embeddings(pixel_values)
+
+ outputs = self.encoder(
+ embedding_output, output_hidden_states=True, output_attentions=output_attentions, return_dict=return_dict
+ )
+
+ hidden_states = outputs.hidden_states if return_dict else outputs[1]
+
+ feature_maps = ()
+ for stage, hidden_state in zip(self.stage_names, hidden_states):
+ if stage in self.out_features:
+ if self.config.apply_layernorm:
+ hidden_state = self.layernorm(hidden_state)
+ if self.config.reshape_hidden_states:
+ hidden_state = hidden_state[:, self.num_register_tokens + 1 :]
+ # this was actually a bug in the original implementation that we copied here,
+ # cause normally the order is height, width
+ batch_size, _, height, width = pixel_values.shape
+ patch_size = self.config.patch_size
+ hidden_state = hidden_state.reshape(batch_size, height // patch_size, width // patch_size, -1)
+ hidden_state = hidden_state.permute(0, 3, 1, 2).contiguous()
+ feature_maps += (hidden_state,)
+
+ if not return_dict:
+ if output_hidden_states:
+ output = (feature_maps,) + outputs[1:]
+ else:
+ output = (feature_maps,) + outputs[2:]
+ return output
+
+ return BackboneOutput(
+ feature_maps=feature_maps,
+ hidden_states=outputs.hidden_states if output_hidden_states else None,
+ attentions=outputs.attentions if output_attentions else None,
+ )
+
+
+__all__ = [
+ "Dinov2WithRegistersPreTrainedModel",
+ "Dinov2WithRegistersModel",
+ "Dinov2WithRegistersForImageClassification",
+ "Dinov2WithRegistersBackbone",
+]
diff --git a/janus/lib/python3.10/site-packages/transformers/models/dinov2_with_registers/modular_dinov2_with_registers.py b/janus/lib/python3.10/site-packages/transformers/models/dinov2_with_registers/modular_dinov2_with_registers.py
new file mode 100644
index 0000000000000000000000000000000000000000..cbd316c421b0369d86202e15c2f6c6c9e6175de7
--- /dev/null
+++ b/janus/lib/python3.10/site-packages/transformers/models/dinov2_with_registers/modular_dinov2_with_registers.py
@@ -0,0 +1,391 @@
+# coding=utf-8
+# Copyright 2024 Meta Inc. and the HuggingFace Inc. team. All rights reserved.
+#
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import Optional
+
+import torch
+import torch.utils.checkpoint
+from torch import nn
+
+from ....transformers.models.dinov2.modeling_dinov2 import (
+ Dinov2Backbone,
+ Dinov2Encoder,
+ Dinov2ForImageClassification,
+ Dinov2Model,
+ Dinov2PatchEmbeddings,
+ Dinov2PreTrainedModel,
+)
+from ...configuration_utils import PretrainedConfig
+from ...modeling_outputs import BackboneOutput
+from ...utils import logging, torch_int
+from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
+
+
+logger = logging.get_logger(__name__)
+
+
+class Dinov2WithRegistersConfig(BackboneConfigMixin, PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`Dinov2WithRegistersModel`]. It is used to instantiate an
+ Dinov2WithRegisters model according to the specified arguments, defining the model architecture. Instantiating a configuration
+ with the defaults will yield a similar configuration to that of the DINOv2 with Registers
+ [facebook/dinov2-with-registers-base](https://huggingface.co/facebook/dinov2-with-registers-base) architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+ Args:
+ hidden_size (`int`, *optional*, defaults to 768):
+ Dimensionality of the encoder layers and the pooler layer.
+ num_hidden_layers (`int`, *optional*, defaults to 12):
+ Number of hidden layers in the Transformer encoder.
+ num_attention_heads (`int`, *optional*, defaults to 12):
+ Number of attention heads for each attention layer in the Transformer encoder.
+ mlp_ratio (`int`, *optional*, defaults to 4):
+ Ratio of the hidden size of the MLPs relative to the `hidden_size`.
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
+ `"relu"`, `"selu"` and `"gelu_new"` are supported.
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.0):
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0):
+ The dropout ratio for the attention probabilities.
+ initializer_range (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ layer_norm_eps (`float`, *optional*, defaults to 1e-06):
+ The epsilon used by the layer normalization layers.
+ image_size (`int`, *optional*, defaults to 224):
+ The size (resolution) of each image.
+ patch_size (`int`, *optional*, defaults to 16):
+ The size (resolution) of each patch.
+ num_channels (`int`, *optional*, defaults to 3):
+ The number of input channels.
+ qkv_bias (`bool`, *optional*, defaults to `True`):
+ Whether to add a bias to the queries, keys and values.
+ layerscale_value (`float`, *optional*, defaults to 1.0):
+ Initial value to use for layer scale.
+ drop_path_rate (`float`, *optional*, defaults to 0.0):
+ Stochastic depth rate per sample (when applied in the main path of residual layers).
+ use_swiglu_ffn (`bool`, *optional*, defaults to `False`):
+ Whether to use the SwiGLU feedforward neural network.
+ num_register_tokens (`int`, *optional*, defaults to 4):
+ Number of register tokens to use.
+ out_features (`List[str]`, *optional*):
+ If used as backbone, list of features to output. Can be any of `"stem"`, `"stage1"`, `"stage2"`, etc.
+ (depending on how many stages the model has). If unset and `out_indices` is set, will default to the
+ corresponding stages. If unset and `out_indices` is unset, will default to the last stage. Must be in the
+ same order as defined in the `stage_names` attribute.
+ out_indices (`List[int]`, *optional*):
+ If used as backbone, list of indices of features to output. Can be any of 0, 1, 2, etc. (depending on how
+ many stages the model has). If unset and `out_features` is set, will default to the corresponding stages.
+ If unset and `out_features` is unset, will default to the last stage. Must be in the
+ same order as defined in the `stage_names` attribute.
+ apply_layernorm (`bool`, *optional*, defaults to `True`):
+ Whether to apply layer normalization to the feature maps in case the model is used as backbone.
+ reshape_hidden_states (`bool`, *optional*, defaults to `True`):
+ Whether to reshape the feature maps to 4D tensors of shape `(batch_size, hidden_size, height, width)` in
+ case the model is used as backbone. If `False`, the feature maps will be 3D tensors of shape `(batch_size,
+ seq_len, hidden_size)`.
+
+ Example:
+
+ ```python
+ >>> from transformers import Dinov2WithRegistersConfig, Dinov2WithRegistersModel
+
+ >>> # Initializing a Dinov2WithRegisters base style configuration
+ >>> configuration = Dinov2WithRegistersConfig()
+
+ >>> # Initializing a model (with random weights) from the base style configuration
+ >>> model = Dinov2WithRegistersModel(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = "dinov2_with_registers"
+
+ def __init__(
+ self,
+ hidden_size=768,
+ num_hidden_layers=12,
+ num_attention_heads=12,
+ mlp_ratio=4,
+ hidden_act="gelu",
+ hidden_dropout_prob=0.0,
+ attention_probs_dropout_prob=0.0,
+ initializer_range=0.02,
+ layer_norm_eps=1e-6,
+ image_size=224,
+ patch_size=16,
+ num_channels=3,
+ qkv_bias=True,
+ layerscale_value=1.0,
+ drop_path_rate=0.0,
+ use_swiglu_ffn=False,
+ num_register_tokens=4,
+ out_features=None,
+ out_indices=None,
+ apply_layernorm=True,
+ reshape_hidden_states=True,
+ **kwargs,
+ ):
+ super().__init__(**kwargs)
+
+ self.hidden_size = hidden_size
+ self.num_hidden_layers = num_hidden_layers
+ self.num_attention_heads = num_attention_heads
+ self.mlp_ratio = mlp_ratio
+ self.hidden_act = hidden_act
+ self.hidden_dropout_prob = hidden_dropout_prob
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
+ self.initializer_range = initializer_range
+ self.layer_norm_eps = layer_norm_eps
+ self.image_size = image_size
+ self.patch_size = patch_size
+ self.num_channels = num_channels
+ self.qkv_bias = qkv_bias
+ self.layerscale_value = layerscale_value
+ self.drop_path_rate = drop_path_rate
+ self.use_swiglu_ffn = use_swiglu_ffn
+ self.num_register_tokens = num_register_tokens
+ self.stage_names = ["stem"] + [f"stage{idx}" for idx in range(1, num_hidden_layers + 1)]
+ self._out_features, self._out_indices = get_aligned_output_features_output_indices(
+ out_features=out_features, out_indices=out_indices, stage_names=self.stage_names
+ )
+ self.apply_layernorm = apply_layernorm
+ self.reshape_hidden_states = reshape_hidden_states
+
+
+class Dinov2WithRegistersPatchEmbeddings(Dinov2PatchEmbeddings):
+ pass
+
+
+class Dinov2WithRegistersEmbeddings(nn.Module):
+ """
+ Construct the CLS token, mask token, register tokens, position and patch embeddings.
+ """
+
+ def __init__(self, config: Dinov2WithRegistersConfig) -> None:
+ super().__init__()
+
+ self.cls_token = nn.Parameter(torch.randn(1, 1, config.hidden_size))
+ self.mask_token = nn.Parameter(torch.zeros(1, config.hidden_size))
+ self.register_tokens = nn.Parameter(torch.zeros(1, config.num_register_tokens, config.hidden_size))
+ self.patch_embeddings = Dinov2WithRegistersPatchEmbeddings(config)
+ num_patches = self.patch_embeddings.num_patches
+ self.position_embeddings = nn.Parameter(torch.randn(1, num_patches + 1, config.hidden_size))
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+ self.patch_size = config.patch_size
+ self.config = config
+
+ def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor:
+ """
+ This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher
+ resolution images. This implementation supports torch.jit tracing while maintaining backwards compatibility
+ with the original implementation.
+
+ Adapted from:
+ - https://github.com/facebookresearch/dino/blob/main/vision_transformer.py
+ - https://github.com/facebookresearch/dinov2/blob/main/dinov2/models/vision_transformer.py
+ """
+ num_patches = embeddings.shape[1] - 1
+ num_positions = self.position_embeddings.shape[1] - 1
+
+ # Skip interpolation for matching dimensions (unless tracing)
+ if not torch.jit.is_tracing() and num_patches == num_positions and height == width:
+ return self.position_embeddings
+
+ # Handle class token and patch embeddings separately
+ class_pos_embed = self.position_embeddings[:, 0]
+ patch_pos_embed = self.position_embeddings[:, 1:]
+ dim = embeddings.shape[-1]
+
+ # Calculate new dimensions
+ height = height // self.config.patch_size
+ width = width // self.config.patch_size
+
+ # Reshape for interpolation
+ sqrt_num_positions = torch_int(num_positions**0.5)
+ patch_pos_embed = patch_pos_embed.reshape(1, sqrt_num_positions, sqrt_num_positions, dim)
+ patch_pos_embed = patch_pos_embed.permute(0, 3, 1, 2)
+
+ # Store original dtype for restoration after interpolation
+ target_dtype = patch_pos_embed.dtype
+
+ # Interpolate at float32 precision
+ patch_pos_embed = nn.functional.interpolate(
+ patch_pos_embed.to(dtype=torch.float32),
+ size=(torch_int(height), torch_int(width)), # Explicit size instead of scale_factor
+ mode="bicubic",
+ align_corners=False,
+ antialias=True,
+ ).to(dtype=target_dtype)
+
+ # Validate output dimensions if not tracing
+ if not torch.jit.is_tracing():
+ if int(height) != patch_pos_embed.shape[-2] or int(width) != patch_pos_embed.shape[-1]:
+ raise ValueError("Width or height does not match with the interpolated position embeddings")
+
+ # Reshape back to original format
+ patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
+
+ # Combine class and patch embeddings
+ return torch.cat((class_pos_embed.unsqueeze(0), patch_pos_embed), dim=1)
+
+ def forward(self, pixel_values: torch.Tensor, bool_masked_pos: Optional[torch.Tensor] = None) -> torch.Tensor:
+ batch_size, _, height, width = pixel_values.shape
+ target_dtype = self.patch_embeddings.projection.weight.dtype
+ embeddings = self.patch_embeddings(pixel_values.to(dtype=target_dtype))
+
+ if bool_masked_pos is not None:
+ embeddings = torch.where(
+ bool_masked_pos.unsqueeze(-1), self.mask_token.to(embeddings.dtype).unsqueeze(0), embeddings
+ )
+
+ # add the [CLS] token to the embedded patch tokens
+ cls_tokens = self.cls_token.expand(batch_size, -1, -1)
+ embeddings = torch.cat((cls_tokens, embeddings), dim=1)
+
+ # add positional encoding to each token
+ embeddings = embeddings + self.interpolate_pos_encoding(embeddings, height, width)
+
+ # add register tokens
+ embeddings = torch.cat(
+ (embeddings[:, :1], self.register_tokens.expand(embeddings.shape[0], -1, -1), embeddings[:, 1:]), dim=1
+ )
+
+ embeddings = self.dropout(embeddings)
+
+ return embeddings
+
+
+class Dinov2WithRegistersEncoder(Dinov2Encoder):
+ pass
+
+
+class Dinov2WithRegistersPreTrainedModel(Dinov2PreTrainedModel):
+ pass
+
+
+class Dinov2WithRegistersModel(Dinov2Model):
+ pass
+
+
+class Dinov2WithRegistersForImageClassification(Dinov2ForImageClassification):
+ pass
+
+
+class Dinov2WithRegistersBackbone(Dinov2Backbone):
+ def __init__(self, config):
+ super().__init__(config)
+ super()._init_backbone(config)
+
+ self.num_register_tokens = config.num_register_tokens
+ self.num_features = [config.hidden_size for _ in range(config.num_hidden_layers + 1)]
+ self.embeddings = Dinov2WithRegistersEmbeddings(config)
+ self.encoder = Dinov2WithRegistersEncoder(config)
+
+ self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self) -> Dinov2WithRegistersPatchEmbeddings:
+ return self.embeddings.patch_embeddings
+
+ def forward(
+ self,
+ pixel_values: torch.Tensor,
+ output_hidden_states: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> BackboneOutput:
+ """
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> from transformers import AutoImageProcessor, AutoBackbone
+ >>> import torch
+ >>> from PIL import Image
+ >>> import requests
+
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
+ >>> image = Image.open(requests.get(url, stream=True).raw)
+
+ >>> processor = AutoImageProcessor.from_pretrained("facebook/dinov2-with-registers-base")
+ >>> model = AutoBackbone.from_pretrained(
+ ... "facebook/dinov2-with-registers-base", out_features=["stage2", "stage5", "stage8", "stage11"]
+ ... )
+
+ >>> inputs = processor(image, return_tensors="pt")
+
+ >>> outputs = model(**inputs)
+ >>> feature_maps = outputs.feature_maps
+ >>> list(feature_maps[-1].shape)
+ [1, 768, 16, 16]
+ ```"""
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+
+ embedding_output = self.embeddings(pixel_values)
+
+ outputs = self.encoder(
+ embedding_output, output_hidden_states=True, output_attentions=output_attentions, return_dict=return_dict
+ )
+
+ hidden_states = outputs.hidden_states if return_dict else outputs[1]
+
+ feature_maps = ()
+ for stage, hidden_state in zip(self.stage_names, hidden_states):
+ if stage in self.out_features:
+ if self.config.apply_layernorm:
+ hidden_state = self.layernorm(hidden_state)
+ if self.config.reshape_hidden_states:
+ hidden_state = hidden_state[:, self.num_register_tokens + 1 :]
+ # this was actually a bug in the original implementation that we copied here,
+ # cause normally the order is height, width
+ batch_size, _, height, width = pixel_values.shape
+ patch_size = self.config.patch_size
+ hidden_state = hidden_state.reshape(batch_size, height // patch_size, width // patch_size, -1)
+ hidden_state = hidden_state.permute(0, 3, 1, 2).contiguous()
+ feature_maps += (hidden_state,)
+
+ if not return_dict:
+ if output_hidden_states:
+ output = (feature_maps,) + outputs[1:]
+ else:
+ output = (feature_maps,) + outputs[2:]
+ return output
+
+ return BackboneOutput(
+ feature_maps=feature_maps,
+ hidden_states=outputs.hidden_states if output_hidden_states else None,
+ attentions=outputs.attentions if output_attentions else None,
+ )
+
+
+__all__ = [
+ "Dinov2WithRegistersConfig",
+ "Dinov2WithRegistersPreTrainedModel",
+ "Dinov2WithRegistersModel",
+ "Dinov2WithRegistersForImageClassification",
+ "Dinov2WithRegistersBackbone",
+]
diff --git a/janus/lib/python3.10/site-packages/transformers/models/instructblipvideo/__pycache__/__init__.cpython-310.pyc b/janus/lib/python3.10/site-packages/transformers/models/instructblipvideo/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9d1305fcd7be3b75ae57b4d4f8813b1e5c39818e
Binary files /dev/null and b/janus/lib/python3.10/site-packages/transformers/models/instructblipvideo/__pycache__/__init__.cpython-310.pyc differ
diff --git a/janus/lib/python3.10/site-packages/transformers/models/instructblipvideo/__pycache__/configuration_instructblipvideo.cpython-310.pyc b/janus/lib/python3.10/site-packages/transformers/models/instructblipvideo/__pycache__/configuration_instructblipvideo.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..26eaa4f0f1f769092c846e2691b00d2d16211b6a
Binary files /dev/null and b/janus/lib/python3.10/site-packages/transformers/models/instructblipvideo/__pycache__/configuration_instructblipvideo.cpython-310.pyc differ
diff --git a/janus/lib/python3.10/site-packages/transformers/models/instructblipvideo/__pycache__/image_processing_instructblipvideo.cpython-310.pyc b/janus/lib/python3.10/site-packages/transformers/models/instructblipvideo/__pycache__/image_processing_instructblipvideo.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c36a647e0278256197174e4cc17fe557565e6d04
Binary files /dev/null and b/janus/lib/python3.10/site-packages/transformers/models/instructblipvideo/__pycache__/image_processing_instructblipvideo.cpython-310.pyc differ
diff --git a/janus/lib/python3.10/site-packages/transformers/models/instructblipvideo/__pycache__/modeling_instructblipvideo.cpython-310.pyc b/janus/lib/python3.10/site-packages/transformers/models/instructblipvideo/__pycache__/modeling_instructblipvideo.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..79080b2bfd3e6b51bdce5764836135a035617e9d
Binary files /dev/null and b/janus/lib/python3.10/site-packages/transformers/models/instructblipvideo/__pycache__/modeling_instructblipvideo.cpython-310.pyc differ
diff --git a/janus/lib/python3.10/site-packages/transformers/models/instructblipvideo/__pycache__/modular_instructblipvideo.cpython-310.pyc b/janus/lib/python3.10/site-packages/transformers/models/instructblipvideo/__pycache__/modular_instructblipvideo.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..59c4ba70cbcd8b597ceb113664009610d7f539ba
Binary files /dev/null and b/janus/lib/python3.10/site-packages/transformers/models/instructblipvideo/__pycache__/modular_instructblipvideo.cpython-310.pyc differ
diff --git a/janus/lib/python3.10/site-packages/transformers/models/instructblipvideo/__pycache__/processing_instructblipvideo.cpython-310.pyc b/janus/lib/python3.10/site-packages/transformers/models/instructblipvideo/__pycache__/processing_instructblipvideo.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..338c350432a29d35510ba8aad0591fa86c8aee1e
Binary files /dev/null and b/janus/lib/python3.10/site-packages/transformers/models/instructblipvideo/__pycache__/processing_instructblipvideo.cpython-310.pyc differ
diff --git a/janus/lib/python3.10/site-packages/transformers/models/instructblipvideo/image_processing_instructblipvideo.py b/janus/lib/python3.10/site-packages/transformers/models/instructblipvideo/image_processing_instructblipvideo.py
new file mode 100644
index 0000000000000000000000000000000000000000..75e07317b05f62108333415d32c286dbd0a31be0
--- /dev/null
+++ b/janus/lib/python3.10/site-packages/transformers/models/instructblipvideo/image_processing_instructblipvideo.py
@@ -0,0 +1,348 @@
+# coding=utf-8
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Image processor class for InstructBLIPVideo. Largely copy of Blip2Processor with addition of a video processing abilities
+"""
+
+from typing import Dict, List, Optional, Union
+
+import numpy as np
+
+from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
+from ...image_transforms import convert_to_rgb, resize, to_channel_dimension_format
+from ...image_utils import (
+ OPENAI_CLIP_MEAN,
+ OPENAI_CLIP_STD,
+ ChannelDimension,
+ ImageInput,
+ PILImageResampling,
+ VideoInput,
+ infer_channel_dimension_format,
+ is_scaled_image,
+ is_valid_image,
+ to_numpy_array,
+ valid_images,
+ validate_preprocess_arguments,
+)
+from ...utils import TensorType, filter_out_non_signature_kwargs, is_vision_available, logging
+
+
+if is_vision_available():
+ import PIL
+
+
+logger = logging.get_logger(__name__)
+
+
+def make_batched_videos(videos) -> List[VideoInput]:
+ if isinstance(videos, (list, tuple)) and isinstance(videos[0], (list, tuple)) and is_valid_image(videos[0][0]):
+ return videos
+
+ elif isinstance(videos, (list, tuple)) and is_valid_image(videos[0]):
+ if isinstance(videos[0], PIL.Image.Image):
+ return [videos]
+ elif len(videos[0].shape) == 4:
+ return [list(video) for video in videos]
+
+ elif is_valid_image(videos):
+ if isinstance(videos, PIL.Image.Image):
+ return [[videos]]
+ elif len(videos.shape) == 4:
+ return [list(videos)]
+
+ raise ValueError(f"Could not make batched video from {videos}")
+
+
+# Copied from transformers.models.blip.image_processing_blip.BlipImageProcessor with Blip->InstructBlipVideo, BLIP->InstructBLIPVideo
+class InstructBlipVideoImageProcessor(BaseImageProcessor):
+ r"""
+ Constructs a InstructBLIPVideo image processor.
+
+ Args:
+ do_resize (`bool`, *optional*, defaults to `True`):
+ Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by the
+ `do_resize` parameter in the `preprocess` method.
+ size (`dict`, *optional*, defaults to `{"height": 384, "width": 384}`):
+ Size of the output image after resizing. Can be overridden by the `size` parameter in the `preprocess`
+ method.
+ resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`):
+ Resampling filter to use if resizing the image. Only has an effect if `do_resize` is set to `True`. Can be
+ overridden by the `resample` parameter in the `preprocess` method.
+ do_rescale (`bool`, *optional*, defaults to `True`):
+ Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the
+ `do_rescale` parameter in the `preprocess` method.
+ rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
+ Scale factor to use if rescaling the image. Only has an effect if `do_rescale` is set to `True`. Can be
+ overridden by the `rescale_factor` parameter in the `preprocess` method.
+ do_normalize (`bool`, *optional*, defaults to `True`):
+ Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess`
+ method. Can be overridden by the `do_normalize` parameter in the `preprocess` method.
+ image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`):
+ Mean to use if normalizing the image. This is a float or list of floats the length of the number of
+ channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. Can be
+ overridden by the `image_mean` parameter in the `preprocess` method.
+ image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`):
+ Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
+ number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
+ Can be overridden by the `image_std` parameter in the `preprocess` method.
+ do_convert_rgb (`bool`, *optional*, defaults to `True`):
+ Whether to convert the image to RGB.
+ """
+
+ model_input_names = ["pixel_values"]
+
+ def __init__(
+ self,
+ do_resize: bool = True,
+ size: Dict[str, int] = None,
+ resample: PILImageResampling = PILImageResampling.BICUBIC,
+ do_rescale: bool = True,
+ rescale_factor: Union[int, float] = 1 / 255,
+ do_normalize: bool = True,
+ image_mean: Optional[Union[float, List[float]]] = None,
+ image_std: Optional[Union[float, List[float]]] = None,
+ do_convert_rgb: bool = True,
+ **kwargs,
+ ) -> None:
+ super().__init__(**kwargs)
+ size = size if size is not None else {"height": 384, "width": 384}
+ size = get_size_dict(size, default_to_square=True)
+
+ self.do_resize = do_resize
+ self.size = size
+ self.resample = resample
+ self.do_rescale = do_rescale
+ self.rescale_factor = rescale_factor
+ self.do_normalize = do_normalize
+ self.image_mean = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
+ self.image_std = image_std if image_std is not None else OPENAI_CLIP_STD
+ self.do_convert_rgb = do_convert_rgb
+
+ # Copied from transformers.models.vit.image_processing_vit.ViTImageProcessor.resize with PILImageResampling.BILINEAR->PILImageResampling.BICUBIC
+ def resize(
+ self,
+ image: np.ndarray,
+ size: Dict[str, int],
+ resample: PILImageResampling = PILImageResampling.BICUBIC,
+ data_format: Optional[Union[str, ChannelDimension]] = None,
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
+ **kwargs,
+ ) -> np.ndarray:
+ """
+ Resize an image to `(size["height"], size["width"])`.
+
+ Args:
+ image (`np.ndarray`):
+ Image to resize.
+ size (`Dict[str, int]`):
+ Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image.
+ resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
+ `PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BICUBIC`.
+ data_format (`ChannelDimension` or `str`, *optional*):
+ The channel dimension format for the output image. If unset, the channel dimension format of the input
+ image is used. Can be one of:
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
+ input_data_format (`ChannelDimension` or `str`, *optional*):
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
+ from the input image. Can be one of:
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
+
+ Returns:
+ `np.ndarray`: The resized image.
+ """
+ size = get_size_dict(size)
+ if "height" not in size or "width" not in size:
+ raise ValueError(f"The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}")
+
+ output_size = (size["height"], size["width"])
+ return resize(
+ image,
+ size=output_size,
+ resample=resample,
+ data_format=data_format,
+ input_data_format=input_data_format,
+ **kwargs,
+ )
+
+ # Ignore copy
+ @filter_out_non_signature_kwargs()
+ def preprocess(
+ self,
+ images: VideoInput = None,
+ do_resize: Optional[bool] = None,
+ size: Optional[Dict[str, int]] = None,
+ resample: PILImageResampling = None,
+ do_rescale: Optional[bool] = None,
+ rescale_factor: Optional[float] = None,
+ do_normalize: Optional[bool] = None,
+ image_mean: Optional[Union[float, List[float]]] = None,
+ image_std: Optional[Union[float, List[float]]] = None,
+ return_tensors: Optional[Union[str, TensorType]] = None,
+ do_convert_rgb: bool = None,
+ data_format: ChannelDimension = ChannelDimension.FIRST,
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
+ ) -> PIL.Image.Image:
+ """
+ Preprocess a video or batch of images/videos.
+
+ Args:
+ videos (`VideoInput`):
+ Video frames to preprocess. Expects a single or batch of videos as a list of frames with pixel values
+ ranging from 0 to 255. If passing in video with pixel values between 0 and 1, set `do_rescale=False`.
+ do_resize (`bool`, *optional*, defaults to `self.do_resize`):
+ Whether to resize the video.
+ size (`Dict[str, int]`, *optional*, defaults to `self.size`):
+ Controls the size of the video after `resize`. The shortest edge of the image is resized to
+ `size["shortest_edge"]` whilst preserving the aspect ratio. If the longest edge of this resized image
+ is > `int(size["shortest_edge"] * (1333 / 800))`, then the image is resized again to make the longest
+ edge equal to `int(size["shortest_edge"] * (1333 / 800))`.
+ resample (`PILImageResampling`, *optional*, defaults to `self.resample`):
+ Resampling filter to use if resizing the video. Only has an effect if `do_resize` is set to `True`.
+ do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
+ Whether to rescale the video values between [0 - 1].
+ rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
+ Rescale factor to rescale the video by if `do_rescale` is set to `True`.
+ do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
+ Whether to normalize the video.
+ image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):
+ Image mean to normalize the video by if `do_normalize` is set to `True`.
+ image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):
+ Image standard deviation to normalize the video by if `do_normalize` is set to `True`.
+ do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`):
+ Whether to convert the image to RGB.
+ return_tensors (`str` or `TensorType`, *optional*):
+ The type of tensors to return. Can be one of:
+ - Unset: Return a list of `np.ndarray`.
+ - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
+ - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
+ - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
+ - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
+ data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
+ The channel dimension format for the output image. Can be one of:
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
+ - Unset: Use the channel dimension format of the input image.
+ input_data_format (`ChannelDimension` or `str`, *optional*):
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
+ from the input image. Can be one of:
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
+ """
+ do_resize = do_resize if do_resize is not None else self.do_resize
+ resample = resample if resample is not None else self.resample
+ do_rescale = do_rescale if do_rescale is not None else self.do_rescale
+ rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
+ do_normalize = do_normalize if do_normalize is not None else self.do_normalize
+ image_mean = image_mean if image_mean is not None else self.image_mean
+ image_std = image_std if image_std is not None else self.image_std
+ do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
+
+ size = size if size is not None else self.size
+ size = get_size_dict(size, default_to_square=False)
+
+ videos = make_batched_videos(images)
+
+ validate_preprocess_arguments(
+ do_rescale=do_rescale,
+ rescale_factor=rescale_factor,
+ do_normalize=do_normalize,
+ image_mean=image_mean,
+ image_std=image_std,
+ do_resize=do_resize,
+ size=size,
+ resample=resample,
+ )
+
+ if not valid_images(videos):
+ raise ValueError(
+ "Invalid input type. Must be of type PIL.Image.Image, numpy.ndarray, "
+ "torch.Tensor, tf.Tensor or jax.ndarray."
+ )
+
+ pixel_values = [
+ [
+ self._preprocess_image(
+ image=frame,
+ do_resize=do_resize,
+ size=size,
+ resample=resample,
+ do_rescale=do_rescale,
+ rescale_factor=rescale_factor,
+ do_normalize=do_normalize,
+ image_mean=image_mean,
+ image_std=image_std,
+ do_convert_rgb=do_convert_rgb,
+ data_format=data_format,
+ input_data_format=input_data_format,
+ )
+ for frame in video
+ ]
+ for video in videos
+ ]
+
+ encoded_outputs = BatchFeature(data={"pixel_values": pixel_values}, tensor_type=return_tensors)
+ return encoded_outputs
+
+ # Ignore copy
+ def _preprocess_image(
+ self,
+ image: ImageInput = None,
+ do_resize: Optional[bool] = None,
+ size: Optional[Dict[str, int]] = None,
+ resample: PILImageResampling = None,
+ do_rescale: Optional[bool] = None,
+ rescale_factor: Optional[float] = None,
+ do_normalize: Optional[bool] = None,
+ image_mean: Optional[Union[float, List[float]]] = None,
+ image_std: Optional[Union[float, List[float]]] = None,
+ do_convert_rgb: bool = None,
+ data_format: ChannelDimension = ChannelDimension.FIRST,
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
+ ) -> np.ndarray:
+ # PIL RGBA images are converted to RGB
+ if do_convert_rgb:
+ image = convert_to_rgb(image)
+
+ # All transformations expect numpy arrays.
+ image = to_numpy_array(image)
+
+ if do_rescale and is_scaled_image(image):
+ logger.warning_once(
+ "It looks like you are trying to rescale already rescaled video frames. If the input"
+ " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
+ )
+
+ if input_data_format is None:
+ # We assume that all images have the same channel dimension format.
+ input_data_format = infer_channel_dimension_format(image)
+
+ if do_resize:
+ image = self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format)
+
+ if do_rescale:
+ image = self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
+
+ if do_normalize:
+ image = self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format)
+
+ image = to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format)
+
+ return image
diff --git a/janus/lib/python3.10/site-packages/transformers/models/instructblipvideo/processing_instructblipvideo.py b/janus/lib/python3.10/site-packages/transformers/models/instructblipvideo/processing_instructblipvideo.py
new file mode 100644
index 0000000000000000000000000000000000000000..1d4e59e26b46214abbf1618af5d21fd6b325b95c
--- /dev/null
+++ b/janus/lib/python3.10/site-packages/transformers/models/instructblipvideo/processing_instructblipvideo.py
@@ -0,0 +1,236 @@
+# coding=utf-8
+# Copyright 2023 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+Processor class for InstructBLIP. Largely copy of Blip2Processor with addition of a tokenizer for the Q-Former.
+"""
+
+import os
+from typing import List, Optional, Union
+
+from ...image_processing_utils import BatchFeature
+from ...image_utils import VideoInput
+from ...processing_utils import ProcessorMixin
+from ...tokenization_utils_base import (
+ AddedToken,
+ BatchEncoding,
+ PaddingStrategy,
+ PreTokenizedInput,
+ TextInput,
+ TruncationStrategy,
+)
+from ...utils import TensorType, logging
+from ..auto import AutoTokenizer
+
+
+logger = logging.get_logger(__name__)
+
+
+class InstructBlipVideoProcessor(ProcessorMixin):
+ r"""
+ Constructs an InstructBLIPVideo processor which wraps a InstructBLIP image processor and a LLaMa/T5 tokenizer into a single
+ processor.
+
+ [`InstructBlipVideoProcessor`] offers all the functionalities of [`InstructBlipVideoImageProcessor`] and [`AutoTokenizer`]. See the
+ docstring of [`~InstructBlipVideoProcessor.__call__`] and [`~InstructBlipVideoProcessor.decode`] for more information.
+
+ Args:
+ image_processor (`InstructBlipVideoImageProcessor`):
+ An instance of [`InstructBlipVideoImageProcessor`]. The image processor is a required input.
+ tokenizer (`AutoTokenizer`):
+ An instance of ['PreTrainedTokenizer`]. The tokenizer is a required input.
+ qformer_tokenizer (`AutoTokenizer`):
+ An instance of ['PreTrainedTokenizer`]. The Q-Former tokenizer is a required input.
+ num_query_tokens (`int`, *optional*):
+ Number of tokens used by the Qformer as queries, should be same as in model's config.
+ """
+
+ attributes = ["image_processor", "tokenizer", "qformer_tokenizer"]
+ valid_kwargs = ["num_query_tokens"]
+ image_processor_class = "InstructBlipVideoImageProcessor"
+ tokenizer_class = "AutoTokenizer"
+ qformer_tokenizer_class = "AutoTokenizer"
+
+ def __init__(self, image_processor, tokenizer, qformer_tokenizer, num_query_tokens=None, **kwargs):
+ if not hasattr(tokenizer, "video_token"):
+ self.video_token = AddedToken("