| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| """ Husky model configuration""" |
|
|
| import copy |
| import os |
| from typing import Union |
|
|
| from transformers.configuration_utils import PretrainedConfig |
| from transformers.models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES |
| from transformers.utils import logging |
|
|
| from transformers.models.auto import CONFIG_MAPPING |
|
|
| logger = logging.get_logger(__name__) |
|
|
| HUSKY_PRETRAINED_CONFIG_ARCHIVE_MAP = { |
| "wofmanaf/husky-7b": "https://huggingface.co/wofmanaf/husky-7b/resolve/main/config.json", |
| } |
|
|
| class HuskyVisionConfig(PretrainedConfig): |
| r""" |
| This is the configuration class to store the configuration of a [`HuskyVisionModel`]. It is used to |
| instantiate a Husky vision encoder according to the specified arguments, defining the model architecture. |
| Instantiating a configuration defaults will yield a similar configuration to that of the Husky architecture. |
| |
| Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the |
| documentation from [`PretrainedConfig`] for more information. |
| |
| Args: |
| hidden_size (`int`, *optional*, defaults to 1408): |
| Dimensionality of the encoder layers and the pooler layer. |
| intermediate_size (`int`, *optional*, defaults to 6144): |
| Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. |
| num_hidden_layers (`int`, *optional*, defaults to 39): |
| Number of hidden layers in the Transformer encoder. |
| num_attention_heads (`int`, *optional*, defaults to 16): |
| Number of attention heads for each attention layer in the Transformer encoder. |
| image_size (`int`, *optional*, defaults to 224): |
| The size (resolution) of each image. |
| patch_size (`int`, *optional*, defaults to 14): |
| The size (resolution) of each patch. |
| hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): |
| The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, |
| `"relu"`, `"selu"` and `"gelu_new"` ``"gelu"` are supported. layer_norm_eps (`float`, *optional*, defaults |
| to 1e-5): The epsilon used by the layer normalization layers. |
| dropout (`float`, *optional*, defaults to 0.0): |
| The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler. |
| attention_dropout (`float`, *optional*, defaults to 0.0): |
| The dropout ratio for the attention probabilities. |
| initializer_range (`float`, *optional*, defaults to 0.02): |
| The standard deviation of the truncated_normal_initializer for initializing all weight matrices. |
| initializer_factor (`float``, *optional*, defaults to 1): |
| A factor for initializing all weight matrices (should be kept to 1, used internally for initialization |
| testing). |
| qkv_bias (`bool`, *optional*, defaults to `True`): |
| Whether to add a bias to the queries and values in the self-attention layers. |
| """ |
|
|
| model_type = "husky_vision_model" |
|
|
| def __init__( |
| self, |
| hidden_size=1408, |
| intermediate_size=6144, |
| projection_dim=512, |
| num_hidden_layers=39, |
| num_attention_heads=16, |
| num_channels=3, |
| image_size=224, |
| patch_size=14, |
| hidden_act="gelu", |
| layer_norm_eps=0.00001, |
| dropout=0.0, |
| attention_dropout=0.0, |
| initializer_range=1e-10, |
| initializer_factor=1.0, |
| qkv_bias=True, |
| _flash_attn_2_enabled=False, |
| **kwargs, |
| ): |
| super().__init__(**kwargs) |
|
|
| self.hidden_size = hidden_size |
| self.intermediate_size = intermediate_size |
| self.projection_dim = projection_dim |
| self.dropout = dropout |
| self.num_hidden_layers = num_hidden_layers |
| self.num_attention_heads = num_attention_heads |
| self.num_channels = num_channels |
| self.patch_size = patch_size |
| self.image_size = image_size |
| self.initializer_range = initializer_range |
| self.initializer_factor = initializer_factor |
| self.attention_dropout = attention_dropout |
| self.layer_norm_eps = layer_norm_eps |
| self.hidden_act = hidden_act |
| self.qkv_bias = qkv_bias |
| self._flash_attn_2_enabled = _flash_attn_2_enabled |
|
|
| @classmethod |
| def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig": |
| config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs) |
|
|
| |
| if config_dict.get("model_type") == "husky": |
| config_dict = config_dict["vision_config"] |
|
|
| if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type: |
| logger.warning( |
| f"You are using a model of type {config_dict['model_type']} to instantiate a model of type " |
| f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." |
| ) |
|
|
| return cls.from_dict(config_dict, **kwargs) |
|
|
| class HuskyQFormerConfig(PretrainedConfig): |
| r""" |
| This is the configuration class to store the configuration of a [`HuskyQFormerModel`]. It is used to |
| instantiate a Husky Querying Transformer (Q-Former) model according to the specified arguments, defining the |
| model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of |
| the Husky [Salesforce/instruct-blip-flan-t5](https://huggingface.co/Salesforce/instruct-blip-flan-t5) |
| architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. |
| Read the documentation from [`PretrainedConfig`] for more information. |
| |
| Note that [`HuskyQFormerModel`] is very similar to [`BertLMHeadModel`] with interleaved cross-attention. |
| |
| Args: |
| vocab_size (`int`, *optional*, defaults to 30522): |
| Vocabulary size of the Q-Former model. Defines the number of different tokens that can be represented by |
| the `inputs_ids` passed when calling the model. |
| hidden_size (`int`, *optional*, defaults to 768): |
| Dimensionality of the encoder layers and the pooler layer. |
| num_hidden_layers (`int`, *optional*, defaults to 12): |
| Number of hidden layers in the Transformer encoder. |
| num_attention_heads (`int`, *optional*, defaults to 12): |
| Number of attention heads for each attention layer in the Transformer encoder. |
| intermediate_size (`int`, *optional*, defaults to 3072): |
| Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder. |
| hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`): |
| The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, |
| `"relu"`, `"silu"` and `"gelu_new"` are supported. |
| hidden_dropout_prob (`float`, *optional*, defaults to 0.1): |
| The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. |
| attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1): |
| The dropout ratio for the attention probabilities. |
| max_position_embeddings (`int`, *optional*, defaults to 512): |
| The maximum sequence length that this model might ever be used with. Typically set this to something large |
| just in case (e.g., 512 or 1024 or 2048). |
| initializer_range (`float`, *optional*, defaults to 0.02): |
| The standard deviation of the truncated_normal_initializer for initializing all weight matrices. |
| layer_norm_eps (`float`, *optional*, defaults to 1e-12): |
| The epsilon used by the layer normalization layers. |
| position_embedding_type (`str`, *optional*, defaults to `"absolute"`): |
| Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For |
| positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to |
| [Self-Attention with Relative Position Representations (Shaw et al.)](https://arxiv.org/abs/1803.02155). |
| For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models |
| with Better Relative Position Embeddings (Huang et al.)](https://arxiv.org/abs/2009.13658). |
| classifier_dropout (`float`, *optional*): |
| The dropout ratio for the classification head. |
| cross_attention_frequency (`int`, *optional*, defaults to 2): |
| The frequency of adding cross-attention to the Transformer layers. |
| encoder_hidden_size (`int`, *optional*, defaults to 1408): |
| The hidden size of the hidden states for cross-attention. |
| """ |
| model_type = "husky_qformer" |
|
|
| def __init__( |
| self, |
| vocab_size=30522, |
| hidden_size=768, |
| num_hidden_layers=12, |
| num_attention_heads=12, |
| intermediate_size=3072, |
| hidden_act="gelu", |
| hidden_dropout_prob=0.1, |
| attention_probs_dropout_prob=0.1, |
| max_position_embeddings=512, |
| initializer_range=0.02, |
| layer_norm_eps=1e-12, |
| pad_token_id=0, |
| position_embedding_type="absolute", |
| classifier_dropout=None, |
| cross_attention_frequency=2, |
| encoder_hidden_size=1408, |
| _flash_attn_2_enabled=False, |
| **kwargs, |
| ): |
| super().__init__(pad_token_id=pad_token_id, **kwargs) |
|
|
| self.vocab_size = vocab_size |
| self.hidden_size = hidden_size |
| self.num_hidden_layers = num_hidden_layers |
| self.num_attention_heads = num_attention_heads |
| self.hidden_act = hidden_act |
| self.intermediate_size = intermediate_size |
| self.hidden_dropout_prob = hidden_dropout_prob |
| self.attention_probs_dropout_prob = attention_probs_dropout_prob |
| self.max_position_embeddings = max_position_embeddings |
| self.initializer_range = initializer_range |
| self.layer_norm_eps = layer_norm_eps |
| self.position_embedding_type = position_embedding_type |
| self.classifier_dropout = classifier_dropout |
| self.cross_attention_frequency = cross_attention_frequency |
| self.encoder_hidden_size = encoder_hidden_size |
| self._flash_attn_2_enabled = _flash_attn_2_enabled |
|
|
| @classmethod |
| def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig": |
| config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs) |
| |
| if config_dict.get("model_type") == "husky": |
| config_dict = config_dict["qformer_config"] |
|
|
| if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type: |
| logger.warning( |
| f"You are using a model of type {config_dict['model_type']} to instantiate a model of type " |
| f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." |
| ) |
|
|
| return cls.from_dict(config_dict, **kwargs) |
|
|
| class HuskyConfig(PretrainedConfig): |
| r""" |
| [`HuskyConfig`] is the configuration class to store the configuration of a |
| [`HuskyForConditionalGeneration`]. It is used to instantiate a Husky model according to the specified |
| arguments, defining the vision model, Q-Former model and language model configs. Instantiating a configuration with |
| the defaults will yield a similar configuration to that of the Husky |
| [Salesforce/instruct-blip-flan-t5](https://huggingface.co/Salesforce/instruct-blip-flan-t5) architecture. |
| |
| Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the |
| documentation from [`PretrainedConfig`] for more information. |
| |
| Args: |
| vision_config (`dict`, *optional*): |
| Dictionary of configuration options used to initialize [`HuskyVisionConfig`]. |
| qformer_config (`dict`, *optional*): |
| Dictionary of configuration options used to initialize [`HuskyQFormerConfig`]. |
| text_config (`dict`, *optional*): |
| Dictionary of configuration options used to initialize any [`PretrainedConfig`]. |
| num_query_tokens (`int`, *optional*, defaults to 32): |
| The number of query tokens passed through the Transformer. |
| |
| kwargs (*optional*): |
| Dictionary of keyword arguments. |
| """ |
|
|
| model_type = "husky" |
| is_composition = True |
|
|
| def __init__(self, vision_config=None, qformer_config=None, text_config=None, num_query_tokens=32, **kwargs): |
| super().__init__(**kwargs) |
|
|
| if vision_config is None: |
| vision_config = {} |
| logger.info("vision_config is None. initializing the HuskyVisionConfig with default values.") |
|
|
| if qformer_config is None: |
| qformer_config = {} |
| logger.info("qformer_config is None. Initializing the HuskyQFormerConfig with default values.") |
|
|
| if text_config is None: |
| text_config = {} |
| logger.info("text_config is None. Initializing the text config with default values (`OPTConfig`).") |
|
|
| self.vision_config = HuskyVisionConfig(**vision_config) |
| self.qformer_config = HuskyQFormerConfig(**qformer_config) |
| text_model_type = text_config["model_type"] if "model_type" in text_config else "opt" |
| self.text_config = CONFIG_MAPPING[text_model_type](**text_config) |
|
|
| self.tie_word_embeddings = self.text_config.tie_word_embeddings |
| self.is_encoder_decoder = self.text_config.is_encoder_decoder |
|
|
| self.num_query_tokens = num_query_tokens |
| self.qformer_config.encoder_hidden_size = self.vision_config.hidden_size |
| self.use_decoder_only_language_model = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES |
| self.initializer_factor = 1.0 |
| self.initializer_range = 0.02 |
|
|
| @classmethod |
| def from_vision_qformer_text_configs( |
| cls, |
| vision_config: HuskyVisionConfig, |
| qformer_config: HuskyQFormerConfig, |
| text_config: PretrainedConfig, |
| **kwargs, |
| ): |
| r""" |
| Instantiate a [`HuskyConfig`] (or a derived class) from a Husky vision model, Q-Former and |
| language model configurations. |
| |
| Returns: |
| [`HuskyConfig`]: An instance of a configuration object |
| """ |
|
|
| return cls( |
| vision_config=vision_config.to_dict(), |
| qformer_config=qformer_config.to_dict(), |
| text_config=text_config.to_dict(), |
| **kwargs, |
| ) |
|
|
| def to_dict(self): |
| """ |
| Serializes this instance to a Python dictionary. Override the default [`~PretrainedConfig.to_dict`]. |
| |
| Returns: |
| `Dict[str, any]`: Dictionary of all the attributes that make up this configuration instance, |
| """ |
| output = copy.deepcopy(self.__dict__) |
| output["vision_config"] = self.vision_config.to_dict() |
| output["qformer_config"] = self.qformer_config.to_dict() |
| output["text_config"] = self.text_config.to_dict() |
| output["model_type"] = self.__class__.model_type |
| return output |
|
|
| if __name__ == '__main__': |
| config = HuskyConfig.from_pretrain |
|
|