|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| import warnings
|
|
|
| from transformers.configuration_utils import PretrainedConfig
|
|
|
| from .configuration_florence2 import Florence2VisionConfig
|
|
|
|
|
| class Gemma2Config(PretrainedConfig):
|
| r"""
|
| This is the configuration class to store the configuration of a [`Gemma2Model`]. It is used to instantiate an Gemma2
|
| model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
|
| defaults will yield a similar configuration to that of the Gemma2-7B.
|
| e.g. [google/gemma2-7b](https://huggingface.co/google/gemma2-7b)
|
| Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
| documentation from [`PretrainedConfig`] for more information.
|
| Args:
|
| vocab_size (`int`, *optional*, defaults to 256000):
|
| Vocabulary size of the Gemma2 model. Defines the number of different tokens that can be represented by the
|
| `inputs_ids` passed when calling [`Gemma2Model`]
|
| hidden_size (`int`, *optional*, defaults to 2304):
|
| Dimension of the hidden representations.
|
| intermediate_size (`int`, *optional*, defaults to 9216):
|
| Dimension of the MLP representations.
|
| num_hidden_layers (`int`, *optional*, defaults to 26):
|
| Number of hidden layers in the Transformer decoder.
|
| num_attention_heads (`int`, *optional*, defaults to 8):
|
| Number of attention heads for each attention layer in the Transformer decoder.
|
| num_key_value_heads (`int`, *optional*, defaults to 4):
|
| This is the number of key_value heads that should be used to implement Grouped Query Attention. If
|
| `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
|
| `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
|
| converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
|
| by meanpooling all the original heads within that group. For more details checkout [this
|
| paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to
|
| `num_attention_heads`.
|
| head_dim (`int`, *optional*, defaults to 256):
|
| The attention head dimension.
|
| hidden_activation (`str` or `function`, *optional*, defaults to `"gelu_pytorch_tanh"`):
|
| The non-linear activation function (function or string) in the decoder. Will default to `"gelu_pytorch_tanh"`
|
| if not specified. `"gelu_pytorch_tanh"` uses an approximation of the `"gelu"` activation function.
|
| max_position_embeddings (`int`, *optional*, defaults to 8192):
|
| The maximum sequence length that this model might ever be used with.
|
| initializer_range (`float`, *optional*, defaults to 0.02):
|
| The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
|
| rms_norm_eps (`float`, *optional*, defaults to 1e-06):
|
| The epsilon used by the rms normalization layers.
|
| use_cache (`bool`, *optional*, defaults to `True`):
|
| Whether or not the model should return the last key/values attentions (not used by all models). Only
|
| relevant if `config.is_decoder=True`.
|
| pad_token_id (`int`, *optional*, defaults to 0):
|
| Padding token id.
|
| eos_token_id (`int`, *optional*, defaults to 1):
|
| End of stream token id.
|
| bos_token_id (`int`, *optional*, defaults to 2):
|
| Beginning of stream token id.
|
| tie_word_embeddings (`bool`, *optional*, defaults to `True`):
|
| Whether to tie weight embeddings
|
| rope_theta (`float`, *optional*, defaults to 10000.0):
|
| The base period of the RoPE embeddings.
|
| attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`):
|
| Whether to use a bias in the query, key, value and output projection layers during self-attention.
|
| attention_dropout (`float`, *optional*, defaults to 0.0):
|
| The dropout ratio for the attention probabilities.
|
| query_pre_attn_scalar (`float`, *optional*, defaults to 256): scaling factor used on the attention scores
|
| sliding_window (`int`, *optional*, defaults to 4096): in Gemma2, every other layer uses sliding window attention. This is the
|
| size of the sliding window.
|
| final_logit_softcapping (`float`, *optional*, defaults to 30.0): scaling factor when applying tanh softcapping on the logits.
|
| attn_logit_softcapping (`float`, *optional*, defaults to 50.0): scaling factor when applying tanh softcapping on the attention scores.
|
| cache_implementation (`str`, *optional*, defaults to `"hybrid"`): the cache type to be used with `generate`.
|
|
|
| ```python
|
| >>> from transformers import Gemma2Model, Gemma2Config
|
| >>> # Initializing a Gemma2 gemma2-7b style configuration
|
| >>> configuration = Gemma2Config()
|
| >>> # Initializing a model from the gemma2-7b style configuration
|
| >>> model = Gemma2Model(configuration)
|
| >>> # Accessing the model configuration
|
| >>> configuration = model.config
|
| ```"""
|
|
|
| model_type = "gemma2"
|
| keys_to_ignore_at_inference = ["past_key_values"]
|
|
|
| def __init__(
|
| self,
|
| vocab_size=256000,
|
| hidden_size=2304,
|
| intermediate_size=9216,
|
| num_hidden_layers=26,
|
| num_attention_heads=8,
|
| num_key_value_heads=4,
|
| head_dim=256,
|
| hidden_activation="gelu_pytorch_tanh",
|
| max_position_embeddings=8192,
|
| initializer_range=0.02,
|
| rms_norm_eps=1e-6,
|
| use_cache=True,
|
| pad_token_id=0,
|
| eos_token_id=1,
|
| bos_token_id=2,
|
| tie_word_embeddings=True,
|
| rope_theta=10000.0,
|
| attention_bias=False,
|
| attention_dropout=0.0,
|
| query_pre_attn_scalar=256,
|
| sliding_window=4096,
|
| final_logit_softcapping=30.0,
|
| attn_logit_softcapping=50.0,
|
| cache_implementation="hybrid",
|
|
|
| encoder_hidden_size=1024,
|
| encoder_attn_every_x_layers=1,
|
| **kwargs,
|
| ):
|
| super().__init__(
|
| pad_token_id=pad_token_id,
|
| bos_token_id=bos_token_id,
|
| eos_token_id=eos_token_id,
|
| tie_word_embeddings=tie_word_embeddings,
|
| **kwargs,
|
| )
|
| self.vocab_size = vocab_size
|
| self.max_position_embeddings = max_position_embeddings
|
| self.hidden_size = hidden_size
|
| self.intermediate_size = intermediate_size
|
| self.num_hidden_layers = num_hidden_layers
|
| self.num_attention_heads = num_attention_heads
|
| self.head_dim = head_dim
|
| self.num_key_value_heads = num_key_value_heads
|
| self.initializer_range = initializer_range
|
| self.rms_norm_eps = rms_norm_eps
|
| self.use_cache = use_cache
|
| self.rope_theta = rope_theta
|
| self.attention_bias = attention_bias
|
| self.attention_dropout = attention_dropout
|
| self.hidden_activation = hidden_activation
|
| self.query_pre_attn_scalar = query_pre_attn_scalar
|
| self.sliding_window = sliding_window
|
| self.final_logit_softcapping = final_logit_softcapping
|
| self.attn_logit_softcapping = attn_logit_softcapping
|
| self.cache_implementation = cache_implementation
|
|
|
| self.encoder_hidden_size = encoder_hidden_size
|
| self.encoder_attn_every_x_layers = encoder_attn_every_x_layers
|
|
|
|
|
| class FlorenceGemma2LanguageConfig(PretrainedConfig):
|
| r"""
|
| This is the configuration class to store the configuration of a [`Florence2LanguagePreTrainedModel`]. It is used to instantiate a BART
|
| model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
|
| defaults will yield a similar configuration to that of the BART
|
| [facebook/bart-large](https://huggingface.co/facebook/bart-large) architecture.
|
|
|
| Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
| documentation from [`PretrainedConfig`] for more information.
|
|
|
|
|
| Args:
|
| vocab_size (`int`, *optional*, defaults to 51289):
|
| Vocabulary size of the Florence2Language model. Defines the number of different tokens that can be represented by the
|
| `inputs_ids` passed when calling [`Florence2LanguageModel`].
|
| d_model (`int`, *optional*, defaults to 1024):
|
| Dimensionality of the layers and the pooler layer.
|
| encoder_layers (`int`, *optional*, defaults to 12):
|
| Number of encoder layers.
|
| decoder_layers (`int`, *optional*, defaults to 12):
|
| Number of decoder layers.
|
| encoder_attention_heads (`int`, *optional*, defaults to 16):
|
| Number of attention heads for each attention layer in the Transformer encoder.
|
| decoder_attention_heads (`int`, *optional*, defaults to 16):
|
| Number of attention heads for each attention layer in the Transformer decoder.
|
| decoder_ffn_dim (`int`, *optional*, defaults to 4096):
|
| Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
|
| encoder_ffn_dim (`int`, *optional*, defaults to 4096):
|
| Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
|
| activation_function (`str` or `function`, *optional*, defaults to `"gelu"`):
|
| The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
|
| `"relu"`, `"silu"` and `"gelu_new"` are supported.
|
| dropout (`float`, *optional*, defaults to 0.1):
|
| The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
|
| attention_dropout (`float`, *optional*, defaults to 0.0):
|
| The dropout ratio for the attention probabilities.
|
| activation_dropout (`float`, *optional*, defaults to 0.0):
|
| The dropout ratio for activations inside the fully connected layer.
|
| classifier_dropout (`float`, *optional*, defaults to 0.0):
|
| The dropout ratio for classifier.
|
| max_position_embeddings (`int`, *optional*, defaults to 1024):
|
| The maximum sequence length that this model might ever be used with. Typically set this to something large
|
| just in case (e.g., 512 or 1024 or 2048).
|
| init_std (`float`, *optional*, defaults to 0.02):
|
| The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
|
| encoder_layerdrop (`float`, *optional*, defaults to 0.0):
|
| The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556)
|
| for more details.
|
| decoder_layerdrop (`float`, *optional*, defaults to 0.0):
|
| The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556)
|
| for more details.
|
| scale_embedding (`bool`, *optional*, defaults to `False`):
|
| Scale embeddings by diving by sqrt(d_model).
|
| use_cache (`bool`, *optional*, defaults to `True`):
|
| Whether or not the model should return the last key/values attentions (not used by all models).
|
| num_labels (`int`, *optional*, defaults to 3):
|
| The number of labels to use in [`Florence2LanguageForSequenceClassification`].
|
| forced_eos_token_id (`int`, *optional*, defaults to 2):
|
| The id of the token to force as the last generated token when `max_length` is reached. Usually set to
|
| `eos_token_id`.
|
|
|
| Example:
|
|
|
| ```python
|
| >>> from transformers import Florence2LanguageConfig, Florence2LanguageModel
|
|
|
| >>> # Initializing a Florence2 Language style configuration
|
| >>> configuration = Florence2LanguageConfig()
|
|
|
| >>> # Initializing a model (with random weights)
|
| >>> model = Florence2LangaugeModel(configuration)
|
|
|
| >>> # Accessing the model configuration
|
| >>> configuration = model.config
|
| ```"""
|
|
|
| model_type = "florencegemma2_language"
|
| keys_to_ignore_at_inference = ["past_key_values"]
|
| attribute_map = {
|
| "num_attention_heads": "encoder_attention_heads",
|
| "hidden_size": "d_model",
|
| }
|
|
|
| def __init__(
|
| self,
|
| vocab_size=51289,
|
| max_position_embeddings=1024,
|
| encoder_layers=12,
|
| encoder_ffn_dim=4096,
|
| encoder_attention_heads=16,
|
| decoder_layers=12,
|
| decoder_ffn_dim=4096,
|
| decoder_attention_heads=16,
|
| encoder_layerdrop=0.0,
|
| decoder_layerdrop=0.0,
|
| activation_function="gelu",
|
| d_model=1024,
|
| dropout=0.1,
|
| attention_dropout=0.0,
|
| activation_dropout=0.0,
|
| init_std=0.02,
|
| classifier_dropout=0.0,
|
| scale_embedding=False,
|
| use_cache=True,
|
| num_labels=3,
|
| pad_token_id=1,
|
| bos_token_id=0,
|
| eos_token_id=2,
|
| is_encoder_decoder=True,
|
| decoder_start_token_id=2,
|
| forced_eos_token_id=2,
|
|
|
| encoder_vocab_size=51289,
|
| use_encoder_tokenizer=False,
|
| encoder_pad_token_id=1,
|
| encoder_bos_token_id=0,
|
| encoder_eos_token_id=2,
|
| gemma_config=None,
|
| **kwargs,
|
| ):
|
| self.vocab_size = vocab_size
|
| self.max_position_embeddings = max_position_embeddings
|
| self.d_model = d_model
|
| self.encoder_ffn_dim = encoder_ffn_dim
|
| self.encoder_layers = encoder_layers
|
| self.encoder_attention_heads = encoder_attention_heads
|
| self.decoder_ffn_dim = decoder_ffn_dim
|
| self.decoder_layers = decoder_layers
|
| self.decoder_attention_heads = decoder_attention_heads
|
| self.dropout = dropout
|
| self.attention_dropout = attention_dropout
|
| self.activation_dropout = activation_dropout
|
| self.activation_function = activation_function
|
| self.init_std = init_std
|
| self.encoder_layerdrop = encoder_layerdrop
|
| self.decoder_layerdrop = decoder_layerdrop
|
| self.classifier_dropout = classifier_dropout
|
| self.use_cache = use_cache
|
| self.num_hidden_layers = encoder_layers
|
| self.scale_embedding = scale_embedding
|
| self.encoder_pad_token_id = encoder_pad_token_id
|
| self.encoder_bos_token_id = encoder_bos_token_id
|
| self.encoder_eos_token_id = encoder_eos_token_id
|
|
|
| self.encoder_vocab_size = encoder_vocab_size
|
| self.use_encoder_tokenizer = use_encoder_tokenizer
|
|
|
| self.gemma_config = gemma_config
|
| if gemma_config is not None:
|
| self.gemma_config = Gemma2Config(**gemma_config)
|
|
|
| super().__init__(
|
| num_labels=num_labels,
|
| pad_token_id=pad_token_id,
|
| bos_token_id=bos_token_id,
|
| eos_token_id=eos_token_id,
|
| is_encoder_decoder=is_encoder_decoder,
|
| decoder_start_token_id=decoder_start_token_id,
|
| forced_eos_token_id=forced_eos_token_id,
|
| **kwargs,
|
| )
|
|
|
|
|
| if self.forced_bos_token_id is None and kwargs.get("force_bos_token_to_be_generated", False):
|
| self.forced_bos_token_id = self.bos_token_id
|
| warnings.warn(
|
| f"Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. "
|
| "The config can simply be saved and uploaded again to be fixed."
|
| )
|
|
|
|
|
| class FlorenceGemma2Config(PretrainedConfig):
|
| r"""
|
| This is the configuration class to store the configuration of a [`Florence2ForConditionalGeneration`]. It is used to instantiate an
|
| Florence-2 model according to the specified arguments, defining the model architecture.
|
|
|
| Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
| documentation from [`PretrainedConfig`] for more information.
|
|
|
| Args:
|
| vision_config (`Florence2VisionConfig`, *optional*):
|
| Custom vision config or dict
|
| text_config (`Union[AutoConfig, dict]`, *optional*):
|
| The config object of the text backbone.
|
| ignore_index (`int`, *optional*, defaults to -100):
|
| The ignore index for the loss function.
|
| vocab_size (`int`, *optional*, defaults to 51289):
|
| Vocabulary size of the Florence2model. Defines the number of different tokens that can be represented by the
|
| `inputs_ids` passed when calling [`~Florence2ForConditionalGeneration`]
|
| projection_dim (`int`, *optional*, defaults to 1024):
|
| Dimension of the multimodal projection space.
|
|
|
| Example:
|
|
|
| ```python
|
| >>> from transformers import Florence2ForConditionalGeneration, Florence2Config, CLIPVisionConfig, BartConfig
|
|
|
| >>> # Initializing a clip-like vision config
|
| >>> vision_config = CLIPVisionConfig()
|
|
|
| >>> # Initializing a Bart config
|
| >>> text_config = BartConfig()
|
|
|
| >>> # Initializing a Florence-2 configuration
|
| >>> configuration = Florence2Config(vision_config, text_config)
|
|
|
| >>> # Initializing a model from the florence-2 configuration
|
| >>> model = Florence2ForConditionalGeneration(configuration)
|
|
|
| >>> # Accessing the model configuration
|
| >>> configuration = model.config
|
| ```"""
|
|
|
| model_type = "florencegemma2"
|
| is_composition = False
|
|
|
| def __init__(
|
| self,
|
| vision_config=None,
|
| text_config=None,
|
| ignore_index=-100,
|
| vocab_size=51289,
|
| projection_dim=1024,
|
| **kwargs,
|
| ):
|
| self.ignore_index = ignore_index
|
| self.vocab_size = vocab_size
|
| self.projection_dim = projection_dim
|
| if vision_config is not None:
|
| vision_config = Florence2VisionConfig(**vision_config)
|
| self.vision_config = vision_config
|
|
|
| self.text_config = text_config
|
| if text_config is not None:
|
| self.text_config = FlorenceGemma2LanguageConfig(**text_config)
|
|
|
| super().__init__(**kwargs)
|
|
|