|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
from typing import Optional |
|
|
|
|
|
from ...configuration_utils import PretrainedConfig |
|
|
from ...modeling_rope_utils import rope_config_validation |
|
|
from ..auto import CONFIG_MAPPING, AutoConfig |
|
|
|
|
|
|
|
|
class AriaTextConfig(PretrainedConfig): |
|
|
r""" |
|
|
This class handles the configuration for the text component of the Aria model. |
|
|
Instantiating a configuration with the defaults will yield a similar configuration to that of the model of the Aria |
|
|
[rhymes-ai/Aria](https://huggingface.co/rhymes-ai/Aria) architecture. |
|
|
This class extends the LlamaConfig to include additional parameters specific to the Mixture of Experts (MoE) architecture. |
|
|
|
|
|
Args: |
|
|
vocab_size (`int`, *optional*, defaults to 32000): |
|
|
Vocabulary size of the LLaMA model. Defines the number of different tokens that can be represented by the |
|
|
`inputs_ids` passed when calling [`LlamaModel`] |
|
|
hidden_size (`int`, *optional*, defaults to 4096): |
|
|
Dimension of the hidden representations. |
|
|
intermediate_size (`int`, *optional*, defaults to 4096): |
|
|
The size of the MLP representations. |
|
|
num_hidden_layers (`int`, *optional*, defaults to 32): |
|
|
Number of hidden layers in the Transformer decoder. |
|
|
num_attention_heads (`int`, *optional*, defaults to 32): |
|
|
Number of attention heads for each attention layer in the Transformer decoder. |
|
|
num_key_value_heads (`int`, *optional*): |
|
|
This is the number of key_value heads that should be used to implement Grouped Query Attention. If |
|
|
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if |
|
|
`num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When |
|
|
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed |
|
|
by meanpooling all the original heads within that group. For more details, check out [this |
|
|
paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to |
|
|
`num_attention_heads`. |
|
|
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`): |
|
|
The non-linear activation function (function or string) in the decoder. |
|
|
max_position_embeddings (`int`, *optional*, defaults to 2048): |
|
|
The maximum sequence length that this model might ever be used with. Llama 1 supports up to 2048 tokens, |
|
|
Llama 2 up to 4096, CodeLlama up to 16384. |
|
|
initializer_range (`float`, *optional*, defaults to 0.02): |
|
|
The standard deviation of the truncated_normal_initializer for initializing all weight matrices. |
|
|
rms_norm_eps (`float`, *optional*, defaults to 1e-06): |
|
|
The epsilon used by the rms normalization layers. |
|
|
use_cache (`bool`, *optional*, defaults to `True`): |
|
|
Whether or not the model should return the last key/values attentions (not used by all models). Only |
|
|
relevant if `config.is_decoder=True`. |
|
|
pad_token_id (`int`, *optional*, defaults to 2): |
|
|
Padding token id. |
|
|
bos_token_id (`int`, *optional*, defaults to 1): |
|
|
Beginning of stream token id. |
|
|
eos_token_id (`int`, *optional*, defaults to 2): |
|
|
End of stream token id. |
|
|
pretraining_tp (`int`, *optional*, defaults to 1): |
|
|
Experimental feature. Tensor parallelism rank used during pretraining. Please refer to [this |
|
|
document](https://huggingface.co/docs/transformers/main/perf_train_gpu_many#tensor-parallelism) to |
|
|
understand more about it. This value is necessary to ensure exact reproducibility of the pretraining |
|
|
results. Please refer to [this issue](https://github.com/pytorch/pytorch/issues/76232). |
|
|
tie_word_embeddings (`bool`, *optional*, defaults to `False`): |
|
|
Whether to tie weight embeddings |
|
|
rope_theta (`float`, *optional*, defaults to 10000.0): |
|
|
The base period of the RoPE embeddings. |
|
|
rope_scaling (`Dict`, *optional*): |
|
|
Dictionary containing the scaling configuration for the RoPE embeddings. NOTE: if you apply new rope type |
|
|
and you expect the model to work on longer `max_position_embeddings`, we recommend you to update this value |
|
|
accordingly. |
|
|
Expected contents: |
|
|
`rope_type` (`str`): |
|
|
The sub-variant of RoPE to use. Can be one of ['default', 'linear', 'dynamic', 'yarn', 'longrope', |
|
|
'llama3'], with 'default' being the original RoPE implementation. |
|
|
`factor` (`float`, *optional*): |
|
|
Used with all rope types except 'default'. The scaling factor to apply to the RoPE embeddings. In |
|
|
most scaling types, a `factor` of x will enable the model to handle sequences of length x * |
|
|
original maximum pre-trained length. |
|
|
`original_max_position_embeddings` (`int`, *optional*): |
|
|
Used with 'dynamic', 'longrope' and 'llama3'. The original max position embeddings used during |
|
|
pretraining. |
|
|
`attention_factor` (`float`, *optional*): |
|
|
Used with 'yarn' and 'longrope'. The scaling factor to be applied on the attention |
|
|
computation. If unspecified, it defaults to value recommended by the implementation, using the |
|
|
`factor` field to infer the suggested value. |
|
|
`beta_fast` (`float`, *optional*): |
|
|
Only used with 'yarn'. Parameter to set the boundary for extrapolation (only) in the linear |
|
|
ramp function. If unspecified, it defaults to 32. |
|
|
`beta_slow` (`float`, *optional*): |
|
|
Only used with 'yarn'. Parameter to set the boundary for interpolation (only) in the linear |
|
|
ramp function. If unspecified, it defaults to 1. |
|
|
`short_factor` (`list[float]`, *optional*): |
|
|
Only used with 'longrope'. The scaling factor to be applied to short contexts (< |
|
|
`original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden |
|
|
size divided by the number of attention heads divided by 2 |
|
|
`long_factor` (`list[float]`, *optional*): |
|
|
Only used with 'longrope'. The scaling factor to be applied to long contexts (< |
|
|
`original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden |
|
|
size divided by the number of attention heads divided by 2 |
|
|
`low_freq_factor` (`float`, *optional*): |
|
|
Only used with 'llama3'. Scaling factor applied to low frequency components of the RoPE |
|
|
`high_freq_factor` (`float`, *optional*): |
|
|
Only used with 'llama3'. Scaling factor applied to high frequency components of the RoPE |
|
|
attention_bias (`bool`, *optional*, defaults to `False`): |
|
|
Whether to use a bias in the query, key, value and output projection layers during self-attention. |
|
|
attention_dropout (`float`, *optional*, defaults to 0.0): |
|
|
The dropout ratio for the attention probabilities. |
|
|
mlp_bias (`bool`, *optional*, defaults to `False`): |
|
|
Whether to use a bias in up_proj, down_proj and gate_proj layers in the MLP layers. |
|
|
head_dim (`int`, *optional*): |
|
|
The attention head dimension. If None, it will default to hidden_size // num_heads |
|
|
moe_num_experts (`int`, *optional*, defaults to 8): |
|
|
The number of experts in the MoE layer. |
|
|
moe_topk (`int`, *optional*, defaults to 2): |
|
|
The number of top experts to route to for each token. |
|
|
moe_num_shared_experts (`int`, *optional*, defaults to 2): |
|
|
The number of shared experts. |
|
|
""" |
|
|
|
|
|
model_type = "aria_text" |
|
|
keys_to_ignore_at_inference = ["past_key_values"] |
|
|
|
|
|
base_model_tp_plan = { |
|
|
"layers.*.self_attn.q_proj": "colwise", |
|
|
"layers.*.self_attn.k_proj": "colwise", |
|
|
"layers.*.self_attn.v_proj": "colwise", |
|
|
"layers.*.self_attn.o_proj": "rowwise", |
|
|
"layers.*.mlp.gate_proj": "colwise", |
|
|
"layers.*.mlp.up_proj": "colwise", |
|
|
"layers.*.mlp.down_proj": "rowwise", |
|
|
} |
|
|
base_model_pp_plan = { |
|
|
"embed_tokens": (["input_ids"], ["inputs_embeds"]), |
|
|
"layers": (["hidden_states", "attention_mask"], ["hidden_states"]), |
|
|
"norm": (["hidden_states"], ["hidden_states"]), |
|
|
} |
|
|
base_config_key = "text_config" |
|
|
|
|
|
def __init__( |
|
|
self, |
|
|
vocab_size=32000, |
|
|
hidden_size=4096, |
|
|
intermediate_size: int = 4096, |
|
|
num_hidden_layers=32, |
|
|
num_attention_heads=32, |
|
|
num_key_value_heads=None, |
|
|
hidden_act="silu", |
|
|
max_position_embeddings=2048, |
|
|
initializer_range=0.02, |
|
|
rms_norm_eps=1e-6, |
|
|
use_cache=True, |
|
|
pad_token_id=2, |
|
|
bos_token_id=1, |
|
|
eos_token_id=2, |
|
|
pretraining_tp=1, |
|
|
tie_word_embeddings=False, |
|
|
rope_theta=10000.0, |
|
|
rope_scaling=None, |
|
|
attention_bias=False, |
|
|
attention_dropout=0.0, |
|
|
mlp_bias=False, |
|
|
head_dim=None, |
|
|
moe_num_experts: int = 8, |
|
|
moe_topk: int = 2, |
|
|
moe_num_shared_experts: int = 2, |
|
|
**kwargs, |
|
|
): |
|
|
super().__init__( |
|
|
pad_token_id=pad_token_id, |
|
|
bos_token_id=bos_token_id, |
|
|
eos_token_id=eos_token_id, |
|
|
tie_word_embeddings=tie_word_embeddings, |
|
|
**kwargs, |
|
|
) |
|
|
self.vocab_size = vocab_size |
|
|
self.max_position_embeddings = max_position_embeddings |
|
|
self.hidden_size = hidden_size |
|
|
self.intermediate_size = intermediate_size |
|
|
self.num_hidden_layers = num_hidden_layers |
|
|
self.num_attention_heads = num_attention_heads |
|
|
|
|
|
|
|
|
if num_key_value_heads is None: |
|
|
num_key_value_heads = num_attention_heads |
|
|
|
|
|
self.num_key_value_heads = num_key_value_heads |
|
|
self.hidden_act = hidden_act |
|
|
self.initializer_range = initializer_range |
|
|
self.rms_norm_eps = rms_norm_eps |
|
|
self.pretraining_tp = pretraining_tp |
|
|
self.use_cache = use_cache |
|
|
self.rope_theta = rope_theta |
|
|
self.rope_scaling = rope_scaling |
|
|
self.attention_bias = attention_bias |
|
|
self.attention_dropout = attention_dropout |
|
|
self.mlp_bias = mlp_bias |
|
|
self.head_dim = head_dim if head_dim is not None else self.hidden_size // self.num_attention_heads |
|
|
|
|
|
|
|
|
if self.rope_scaling is not None and "type" in self.rope_scaling: |
|
|
self.rope_scaling["rope_type"] = self.rope_scaling["type"] |
|
|
rope_config_validation(self) |
|
|
self.moe_num_experts = moe_num_experts |
|
|
self.moe_topk = moe_topk |
|
|
self.moe_num_shared_experts = moe_num_shared_experts |
|
|
|
|
|
|
|
|
class AriaConfig(PretrainedConfig): |
|
|
r""" |
|
|
This class handles the configuration for both vision and text components of the Aria model, |
|
|
as well as additional parameters for image token handling and projector mapping. |
|
|
Instantiating a configuration with the defaults will yield a similar configuration to that of the model of the Aria |
|
|
[rhymes-ai/Aria](https://huggingface.co/rhymes-ai/Aria) architecture. |
|
|
|
|
|
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the |
|
|
documentation from [`PretrainedConfig`] for more information. |
|
|
|
|
|
Args: |
|
|
vision_config (`AriaVisionConfig` or `dict`, *optional*): |
|
|
Configuration for the vision component. |
|
|
vision_feature_layer (`int`, *optional*, defaults to -1): |
|
|
The index of the layer to select the vision feature. |
|
|
text_config (`AriaTextConfig` or `dict`, *optional*): |
|
|
Configuration for the text component. |
|
|
projector_patch_to_query_dict (`dict`, *optional*): |
|
|
Mapping of patch sizes to query dimensions. |
|
|
image_token_index (`int`, *optional*, defaults to 9): |
|
|
Index used to represent image tokens. |
|
|
initializer_range (`float`, *optional*, defaults to 0.02): |
|
|
The standard deviation of the truncated normal initializer for initializing all weight matrices. |
|
|
|
|
|
Attributes: |
|
|
model_type (`str`): |
|
|
Type of the model, set to `"aria"`. |
|
|
image_token_index (`int`): |
|
|
Index used to represent image tokens. |
|
|
projector_patch_to_query_dict (`dict`): |
|
|
Mapping of patch sizes to query dimensions. |
|
|
vision_config (`AriaVisionConfig`): |
|
|
Configuration for the vision component. |
|
|
text_config (`AriaTextConfig`): |
|
|
Configuration for the text component. |
|
|
""" |
|
|
|
|
|
model_type = "aria" |
|
|
attribute_map = { |
|
|
"image_token_id": "image_token_index", |
|
|
} |
|
|
sub_configs = {"text_config": AriaTextConfig, "vision_config": AutoConfig} |
|
|
|
|
|
def __init__( |
|
|
self, |
|
|
vision_config=None, |
|
|
vision_feature_layer: int = -1, |
|
|
text_config: AriaTextConfig = None, |
|
|
projector_patch_to_query_dict: Optional[dict] = None, |
|
|
image_token_index: int = 9, |
|
|
initializer_range: float = 0.02, |
|
|
**kwargs, |
|
|
): |
|
|
self.image_token_index = image_token_index |
|
|
|
|
|
|
|
|
|
|
|
if projector_patch_to_query_dict is None: |
|
|
projector_patch_to_query_dict = { |
|
|
1225: 128, |
|
|
4900: 256, |
|
|
} |
|
|
self.projector_patch_to_query_dict = {int(k): int(v) for k, v in projector_patch_to_query_dict.items()} |
|
|
self.max_value_projector_patch_to_query_dict = max(self.projector_patch_to_query_dict.values()) |
|
|
self.vision_feature_layer = vision_feature_layer |
|
|
if isinstance(vision_config, dict): |
|
|
vision_config["model_type"] = "idefics3_vision" |
|
|
vision_config = CONFIG_MAPPING[vision_config["model_type"]](**vision_config) |
|
|
elif vision_config is None: |
|
|
vision_config = CONFIG_MAPPING["idefics3_vision"]() |
|
|
|
|
|
self.vision_config = vision_config |
|
|
self.initializer_range = initializer_range |
|
|
|
|
|
if isinstance(text_config, dict) and "model_type" in text_config: |
|
|
text_config = AriaTextConfig(**text_config) |
|
|
elif text_config is None: |
|
|
text_config = AriaTextConfig() |
|
|
|
|
|
self.text_config = text_config |
|
|
|
|
|
super().__init__(**kwargs) |
|
|
|
|
|
|
|
|
__all__ = ["AriaConfig", "AriaTextConfig"] |
|
|
|