| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| """DiffusionVL-Qwen2.5 (SigLIP + Qwen2.5) model configuration.""" |
|
|
| from typing import List, Optional, Union |
|
|
| from transformers.configuration_utils import PretrainedConfig |
|
|
|
|
| class DiffusionVL_Qwen2_5_VisionConfig(PretrainedConfig): |
| """ |
| Configuration for SigLIP vision encoder used in DiffusionVL-Qwen2.5. |
| |
| Args: |
| hidden_size: Dimension of the encoder layers (1152 for SigLIP-SO400M). |
| intermediate_size: Dimension of the MLP layers. |
| num_hidden_layers: Number of transformer layers. |
| num_attention_heads: Number of attention heads. |
| num_channels: Number of input channels. |
| image_size: Input image resolution. |
| patch_size: Patch size for patch embedding. |
| hidden_act: Activation function. |
| layer_norm_eps: Layer normalization epsilon. |
| attention_dropout: Attention dropout probability. |
| """ |
|
|
| model_type = "diffusionvl_qwen2_5_vision" |
| base_config_key = "vision_config" |
|
|
| def __init__( |
| self, |
| hidden_size: int = 1152, |
| intermediate_size: int = 4304, |
| num_hidden_layers: int = 26, |
| num_attention_heads: int = 16, |
| num_channels: int = 3, |
| image_size: int = 384, |
| patch_size: int = 14, |
| hidden_act: str = "gelu_pytorch_tanh", |
| layer_norm_eps: float = 1e-6, |
| attention_dropout: float = 0.0, |
| **kwargs, |
| ): |
| super().__init__(**kwargs) |
|
|
| self.hidden_size = hidden_size |
| self.intermediate_size = intermediate_size |
| self.num_hidden_layers = num_hidden_layers |
| self.num_attention_heads = num_attention_heads |
| self.num_channels = num_channels |
| self.image_size = image_size |
| self.patch_size = patch_size |
| self.hidden_act = hidden_act |
| self.layer_norm_eps = layer_norm_eps |
| self.attention_dropout = attention_dropout |
|
|
|
|
| class DiffusionVL_Qwen2_5_Config(PretrainedConfig): |
| """ |
| Configuration for DiffusionVL-Qwen2.5 model. |
| |
| This model uses: |
| - SigLIP as the vision encoder (external ViT) |
| - PoolerProjector as the MM projector (Conv2d + MLP) |
| - Qwen2.5 as the LLM backbone (standard RoPE, not M-RoPE) |
| - BD3LM for diffusion-based generation |
| |
| Args: |
| vocab_size: Vocabulary size. |
| hidden_size: Dimension of the hidden representations. |
| intermediate_size: Dimension of the MLP representations. |
| num_hidden_layers: Number of hidden layers. |
| num_attention_heads: Number of attention heads. |
| num_key_value_heads: Number of key-value heads for GQA. |
| hidden_act: Activation function. |
| max_position_embeddings: Maximum sequence length. |
| initializer_range: Standard deviation for weight initialization. |
| rms_norm_eps: Epsilon for RMS normalization. |
| use_cache: Whether to use KV cache. |
| tie_word_embeddings: Whether to tie input and output embeddings. |
| attention_dropout: Attention dropout probability. |
| vision_config: Vision encoder configuration. |
| mm_hidden_size: Vision encoder hidden size for projector. |
| enable_bd3lm: Whether to enable BD3LM. |
| bd3lm_block_size: Block size for BD3LM. |
| mask_token_id: Token ID for mask token. |
| rope_theta: RoPE base period. |
| sliding_window: Sliding window size for attention. |
| """ |
|
|
| model_type = "diffusionvl_qwen" |
| sub_configs = {"vision_config": DiffusionVL_Qwen2_5_VisionConfig} |
| keys_to_ignore_at_inference = ["past_key_values"] |
|
|
| def __init__( |
| self, |
| |
| vocab_size: int = 152064, |
| hidden_size: int = 3584, |
| intermediate_size: int = 18944, |
| num_hidden_layers: int = 28, |
| num_attention_heads: int = 28, |
| num_key_value_heads: int = 4, |
| hidden_act: str = "silu", |
| max_position_embeddings: int = 32768, |
| initializer_range: float = 0.02, |
| rms_norm_eps: float = 1e-6, |
| use_cache: bool = True, |
| tie_word_embeddings: bool = False, |
| attention_dropout: float = 0.0, |
| |
| vision_config: Optional[Union[DiffusionVL_Qwen2_5_VisionConfig, dict]] = None, |
| |
| mm_hidden_size: int = 1152, |
| |
| enable_bd3lm: bool = True, |
| bd3lm_block_size: int = 8, |
| bd3lm_antithetic_sampling: bool = True, |
| bd3lm_sampling_eps_min: float = 1e-3, |
| bd3lm_sampling_eps_max: float = 1.0, |
| mask_token_id: int = 151671, |
| |
| rope_theta: float = 1000000.0, |
| rope_scaling: Optional[dict] = None, |
| |
| sliding_window: int = 32768, |
| max_window_layers: int = 28, |
| use_sliding_window: bool = False, |
| **kwargs, |
| ): |
| |
| |
| kwargs.pop("text_config", None) |
|
|
| |
| self.vocab_size = vocab_size |
| self.hidden_size = hidden_size |
| self.intermediate_size = intermediate_size |
| self.num_hidden_layers = num_hidden_layers |
| self.num_attention_heads = num_attention_heads |
| self.num_key_value_heads = num_key_value_heads |
| self.hidden_act = hidden_act |
| self.max_position_embeddings = max_position_embeddings |
| self.initializer_range = initializer_range |
| self.rms_norm_eps = rms_norm_eps |
| self.use_cache = use_cache |
| self.attention_dropout = attention_dropout |
| self.rope_theta = rope_theta |
| self.rope_scaling = rope_scaling |
| self.sliding_window = sliding_window |
| self.max_window_layers = max_window_layers |
| self.use_sliding_window = use_sliding_window |
|
|
| |
| if vision_config is None: |
| self.vision_config = DiffusionVL_Qwen2_5_VisionConfig() |
| elif isinstance(vision_config, dict): |
| self.vision_config = DiffusionVL_Qwen2_5_VisionConfig(**vision_config) |
| elif isinstance(vision_config, DiffusionVL_Qwen2_5_VisionConfig): |
| self.vision_config = vision_config |
| else: |
| self.vision_config = DiffusionVL_Qwen2_5_VisionConfig() |
|
|
| |
| self.mm_hidden_size = mm_hidden_size |
|
|
| |
| self.enable_bd3lm = enable_bd3lm |
| self.bd3lm_block_size = bd3lm_block_size |
| self.bd3lm_antithetic_sampling = bd3lm_antithetic_sampling |
| self.bd3lm_sampling_eps_min = bd3lm_sampling_eps_min |
| self.bd3lm_sampling_eps_max = bd3lm_sampling_eps_max |
| self.mask_token_id = mask_token_id |
|
|
| super().__init__(tie_word_embeddings=tie_word_embeddings, **kwargs) |
|
|
|
|
| __all__ = ["DiffusionVL_Qwen2_5_Config", "DiffusionVL_Qwen2_5_VisionConfig"] |
|
|