| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| from transformers.configuration_utils import PretrainedConfig |
| from transformers.utils import logging |
|
|
|
|
| logger = logging.get_logger(__name__) |
|
|
|
|
| class Fgclip2TextConfig(PretrainedConfig): |
| r""" |
| This is the configuration class to store the configuration of a [`Fgclip2TextModel`]. It is used to instantiate a |
| Fgclip2 text encoder according to the specified arguments, defining the model architecture. Instantiating a |
| configuration with the defaults will yield a similar configuration to that of the text encoder of the Fgclip2 |
| [qihoo360/fg-clip2-base](https://huggingface.co/qihoo360/fg-clip2-base) architecture. |
| |
| Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the |
| documentation from [`PretrainedConfig`] for more information. |
| |
| Args: |
| vocab_size (`int`, *optional*, defaults to 32000): |
| Vocabulary size of the Fgclip2 text model. Defines the number of different tokens that can be represented by |
| the `inputs_ids` passed when calling [`Fgclip2Model`]. |
| hidden_size (`int`, *optional*, defaults to 768): |
| Dimensionality of the encoder layers and the pooler layer. |
| intermediate_size (`int`, *optional*, defaults to 3072): |
| Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. |
| num_hidden_layers (`int`, *optional*, defaults to 12): |
| Number of hidden layers in the Transformer encoder. |
| num_attention_heads (`int`, *optional*, defaults to 12): |
| Number of attention heads for each attention layer in the Transformer encoder. |
| max_position_embeddings (`int`, *optional*, defaults to 64): |
| The maximum sequence length that this model might ever be used with. Typically set this to something large |
| just in case (e.g., 512 or 1024 or 2048). |
| hidden_act (`str` or `function`, *optional*, defaults to `"gelu_pytorch_tanh"`): |
| The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, |
| `"relu"`, `"selu"` and `"gelu_new"` `"quick_gelu"` are supported. |
| layer_norm_eps (`float`, *optional*, defaults to 1e-06): |
| The epsilon used by the layer normalization layers. |
| attention_dropout (`float`, *optional*, defaults to 0.0): |
| The dropout ratio for the attention probabilities. |
| pad_token_id (`int`, *optional*, defaults to 1): |
| The id of the padding token in the vocabulary. |
| bos_token_id (`int`, *optional*, defaults to 49406): |
| The id of the beginning-of-sequence token in the vocabulary. |
| eos_token_id (`int`, *optional*, defaults to 49407): |
| The id of the end-of-sequence token in the vocabulary. |
| projection_size (`int`, *optional*, defaults to `hidden_size`): |
| The size of the projection head. |
| keep_len (`int`, *optional*, defaults to 20): |
| When processing long texts, the retained tokens are used for handling short text lengths. |
| For details, please refer to the FG-CLIP 'https://arxiv.org/abs/2505.05071' paper. |
| longtext_len (`int`, *optional*, defaults to 196): |
| The maximum number of tokens in long texts that can be processed |
| |
| |
| Example: |
| |
| ```python |
| >>> from transformers import Fgclip2TextConfig, Fgclip2TextModel |
| |
| >>> # Initializing a Fgclip2TextConfig with qihoo/fgclip2-base-patch16 style configuration |
| >>> configuration = Fgclip2TextConfig() |
| |
| >>> # Initializing a Fgclip2TextModel (with random weights) from the qihoo/fgclip2-base-patch16 style configuration |
| >>> model = Fgclip2TextModel(configuration) |
| |
| >>> # Accessing the model configuration |
| >>> configuration = model.config |
| ```""" |
|
|
| model_type = "fgclip2_text_model" |
| base_config_key = "text_config" |
|
|
| def __init__( |
| self, |
| vocab_size=32000, |
| hidden_size=768, |
| intermediate_size=3072, |
| num_hidden_layers=12, |
| num_attention_heads=12, |
| max_position_embeddings=64, |
| hidden_act="gelu_pytorch_tanh", |
| layer_norm_eps=1e-6, |
| attention_dropout=0.0, |
| |
| |
| pad_token_id=1, |
| bos_token_id=49406, |
| eos_token_id=49407, |
| projection_size=None, |
| keep_len=20, |
| longtext_len=196, |
| **kwargs, |
| ): |
| super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) |
|
|
| self.vocab_size = vocab_size |
| self.hidden_size = hidden_size |
| self.intermediate_size = intermediate_size |
| self.num_hidden_layers = num_hidden_layers |
| self.num_attention_heads = num_attention_heads |
| self.max_position_embeddings = max_position_embeddings |
| self.layer_norm_eps = layer_norm_eps |
| self.hidden_act = hidden_act |
| self.attention_dropout = attention_dropout |
| self.projection_size = projection_size if projection_size is not None else hidden_size |
| self.keep_len = keep_len |
| self.longtext_len = longtext_len |
|
|
|
|
| class Fgclip2VisionConfig(PretrainedConfig): |
| r""" |
| This is the configuration class to store the configuration of a [`Fgclip2VisionModel`]. It is used to instantiate a |
| Fgclip2 vision encoder according to the specified arguments, defining the model architecture. Instantiating a |
| configuration with the defaults will yield a similar configuration to that of the vision encoder of the Fgclip2 |
| [qihoo/fgclip2-base-patch16](https://huggingface.co/qihoo/fgclip2-base-patch16) architecture. |
| |
| Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the |
| documentation from [`PretrainedConfig`] for more information. |
| |
| Args: |
| hidden_size (`int`, *optional*, defaults to 768): |
| Dimensionality of the encoder layers and the pooler layer. |
| intermediate_size (`int`, *optional*, defaults to 3072): |
| Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. |
| num_hidden_layers (`int`, *optional*, defaults to 12): |
| Number of hidden layers in the Transformer encoder. |
| num_attention_heads (`int`, *optional*, defaults to 12): |
| Number of attention heads for each attention layer in the Transformer encoder. |
| num_channels (`int`, *optional*, defaults to 3): |
| Number of channels in the input images. |
| num_patches (`int`, *optional*, defaults to 256): |
| The number of patches in the image with the size of (`patch_size`, `patch_size`). |
| The image is resized to fill maximum of this number of patches, and to preserve |
| the aspect ratio. In case the resulted number of patches is lower, the image is |
| padded in "patch" dimension. |
| patch_size (`int`, *optional*, defaults to 16): |
| The size (resolution) of each patch. |
| hidden_act (`str` or `function`, *optional*, defaults to `"gelu_pytorch_tanh"`): |
| The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, |
| `"relu"`, `"selu"` and `"gelu_new"` `"quick_gelu"` are supported. |
| layer_norm_eps (`float`, *optional*, defaults to 1e-06): |
| The epsilon used by the layer normalization layers. |
| attention_dropout (`float`, *optional*, defaults to 0.0): |
| The dropout ratio for the attention probabilities. |
| |
| Example: |
| |
| ```python |
| >>> from transformers import Fgclip2VisionConfig, Fgclip2VisionModel |
| |
| >>> # Initializing a Fgclip2VisionConfig with qihoo/fgclip2-base-patch16 style configuration |
| >>> configuration = Fgclip2VisionConfig() |
| |
| >>> # Initializing a Fgclip2VisionModel (with random weights) from the qihoo/fgclip2-base-patch16 style configuration |
| >>> model = Fgclip2VisionModel(configuration) |
| |
| >>> # Accessing the model configuration |
| >>> configuration = model.config |
| ```""" |
|
|
| model_type = "fgclip2_vision_model" |
| base_config_key = "vision_config" |
|
|
| def __init__( |
| self, |
| hidden_size=768, |
| intermediate_size=3072, |
| num_hidden_layers=12, |
| num_attention_heads=12, |
| num_channels=3, |
| num_patches=256, |
| patch_size=16, |
| hidden_act="gelu_pytorch_tanh", |
| layer_norm_eps=1e-6, |
| attention_dropout=0.0, |
| **kwargs, |
| ): |
| super().__init__(**kwargs) |
|
|
| self.hidden_size = hidden_size |
| self.intermediate_size = intermediate_size |
| self.num_hidden_layers = num_hidden_layers |
| self.num_attention_heads = num_attention_heads |
| self.num_channels = num_channels |
| self.patch_size = patch_size |
| self.attention_dropout = attention_dropout |
| self.layer_norm_eps = layer_norm_eps |
| self.hidden_act = hidden_act |
| self.num_patches = num_patches |
|
|
|
|
| class Fgclip2Config(PretrainedConfig): |
| r""" |
| [`Fgclip2Config`] is the configuration class to store the configuration of a [`Fgclip2Model`]. It is used to |
| instantiate a Fgclip2 model according to the specified arguments, defining the text model and vision model configs. |
| Instantiating a configuration with the defaults will yield a similar configuration to that of the Fgclip2 |
| [qihoo/fgclip2-base-patch16](https://huggingface.co/qihoo/fgclip2-base-patch16) architecture. |
| |
| Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the |
| documentation from [`PretrainedConfig`] for more information. |
| |
| Args: |
| text_config (`dict`, *optional*): |
| Dictionary of configuration options used to initialize [`Fgclip2TextConfig`]. |
| vision_config (`dict`, *optional*): |
| Dictionary of configuration options used to initialize [`Fgclip2VisionConfig`]. |
| kwargs (*optional*): |
| Dictionary of keyword arguments. |
| |
| Example: |
| |
| ```python |
| >>> from transformers import Fgclip2Config, Fgclip2Model |
| |
| >>> # Initializing a Fgclip2Config with qihoo/fgclip2-base-patch16 style configuration |
| >>> configuration = Fgclip2Config() |
| |
| >>> # Initializing a Fgclip2Model (with random weights) from the qihoo/fgclip2-base-patch16 style configuration |
| >>> model = Fgclip2Model(configuration) |
| |
| >>> # Accessing the model configuration |
| >>> configuration = model.config |
| |
| >>> # We can also initialize a Fgclip2Config from a Fgclip2TextConfig and a Fgclip2VisionConfig |
| >>> from transformers import Fgclip2TextConfig, Fgclip2VisionConfig |
| |
| >>> # Initializing a Fgclip2Text and Fgclip2Vision configuration |
| >>> config_text = Fgclip2TextConfig() |
| >>> config_vision = Fgclip2VisionConfig() |
| |
| >>> config = Fgclip2Config.from_text_vision_configs(config_text, config_vision) |
| ```""" |
|
|
| model_type = "fgclip2" |
| sub_configs = {"text_config": Fgclip2TextConfig, "vision_config": Fgclip2VisionConfig} |
|
|
| def __init__(self, text_config=None, vision_config=None, **kwargs): |
| super().__init__(**kwargs) |
|
|
| if text_config is None: |
| text_config = {} |
| logger.info("`text_config` is `None`. Initializing the `Fgclip2TextConfig` with default values.") |
|
|
| if vision_config is None: |
| vision_config = {} |
| logger.info("`vision_config` is `None`. initializing the `Fgclip2VisionConfig` with default values.") |
|
|
| self.text_config = Fgclip2TextConfig(**text_config) |
| self.vision_config = Fgclip2VisionConfig(**vision_config) |
|
|
| self.initializer_factor = 1.0 |
|
|
|
|
| __all__ = ["Fgclip2Config", "Fgclip2TextConfig", "Fgclip2VisionConfig"] |
|
|