chamber111 commited on
Commit
680aafe
·
verified ·
1 Parent(s): 51fa3cb

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
README.md CHANGED
@@ -1,3 +1,67 @@
1
- ---
2
- license: mit
3
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: transformers
3
+ model_name: Qwen3-VL-4B-Instruct-Lime-512-GRPO-Eval-3
4
+ tags:
5
+ - generated_from_trainer
6
+ - trl
7
+ - grpo
8
+ licence: license
9
+ ---
10
+
11
+ # Model Card for Qwen3-VL-4B-Instruct-Lime-512-GRPO-Eval-3
12
+
13
+ This model is a fine-tuned version of [None](https://huggingface.co/None).
14
+ It has been trained using [TRL](https://github.com/huggingface/trl).
15
+
16
+ ## Quick start
17
+
18
+ ```python
19
+ from transformers import pipeline
20
+
21
+ question = "If you had a time machine, but could only go to the past or the future once and never return, which would you choose and why?"
22
+ generator = pipeline("text-generation", model="None", device="cuda")
23
+ output = generator([{"role": "user", "content": question}], max_new_tokens=128, return_full_text=False)[0]
24
+ print(output["generated_text"])
25
+ ```
26
+
27
+ ## Training procedure
28
+
29
+
30
+
31
+
32
+ This model was trained with GRPO, a method introduced in [DeepSeekMath: Pushing the Limits of Mathematical Reasoning in Open Language Models](https://huggingface.co/papers/2402.03300).
33
+
34
+ ### Framework versions
35
+
36
+ - TRL: 0.26.2
37
+ - Transformers: 4.57.3
38
+ - Pytorch: 2.8.0+cu128
39
+ - Datasets: 4.4.2
40
+ - Tokenizers: 0.22.1
41
+
42
+ ## Citations
43
+
44
+ Cite GRPO as:
45
+
46
+ ```bibtex
47
+ @article{shao2024deepseekmath,
48
+ title = {{DeepSeekMath: Pushing the Limits of Mathematical Reasoning in Open Language Models}},
49
+ author = {Zhihong Shao and Peiyi Wang and Qihao Zhu and Runxin Xu and Junxiao Song and Mingchuan Zhang and Y. K. Li and Y. Wu and Daya Guo},
50
+ year = 2024,
51
+ eprint = {arXiv:2402.03300},
52
+ }
53
+
54
+ ```
55
+
56
+ Cite TRL as:
57
+
58
+ ```bibtex
59
+ @misc{vonwerra2022trl,
60
+ title = {{TRL: Transformer Reinforcement Learning}},
61
+ author = {Leandro von Werra and Younes Belkada and Lewis Tunstall and Edward Beeching and Tristan Thrush and Nathan Lambert and Shengyi Huang and Kashif Rasul and Quentin Gallou{\'e}dec},
62
+ year = 2020,
63
+ journal = {GitHub repository},
64
+ publisher = {GitHub},
65
+ howpublished = {\url{https://github.com/huggingface/trl}}
66
+ }
67
+ ```
added_tokens.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "</think>": 151668,
3
+ "</tool_call>": 151658,
4
+ "</tool_response>": 151666,
5
+ "<think>": 151667,
6
+ "<tool_call>": 151657,
7
+ "<tool_response>": 151665,
8
+ "<|box_end|>": 151649,
9
+ "<|box_start|>": 151648,
10
+ "<|endoftext|>": 151643,
11
+ "<|file_sep|>": 151664,
12
+ "<|fim_middle|>": 151660,
13
+ "<|fim_pad|>": 151662,
14
+ "<|fim_prefix|>": 151659,
15
+ "<|fim_suffix|>": 151661,
16
+ "<|im_end|>": 151645,
17
+ "<|im_start|>": 151644,
18
+ "<|image_pad|>": 151655,
19
+ "<|object_ref_end|>": 151647,
20
+ "<|object_ref_start|>": 151646,
21
+ "<|quad_end|>": 151651,
22
+ "<|quad_start|>": 151650,
23
+ "<|repo_name|>": 151663,
24
+ "<|video_pad|>": 151656,
25
+ "<|vision_end|>": 151653,
26
+ "<|vision_pad|>": 151654,
27
+ "<|vision_start|>": 151652
28
+ }
chat_template.jinja ADDED
@@ -0,0 +1,120 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {%- if tools %}
2
+ {{- '<|im_start|>system\n' }}
3
+ {%- if messages[0].role == 'system' %}
4
+ {%- if messages[0].content is string %}
5
+ {{- messages[0].content }}
6
+ {%- else %}
7
+ {%- for content in messages[0].content %}
8
+ {%- if 'text' in content %}
9
+ {{- content.text }}
10
+ {%- endif %}
11
+ {%- endfor %}
12
+ {%- endif %}
13
+ {{- '\n\n' }}
14
+ {%- endif %}
15
+ {{- "# Tools\n\nYou may call one or more functions to assist with the user query.\n\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>" }}
16
+ {%- for tool in tools %}
17
+ {{- "\n" }}
18
+ {{- tool | tojson }}
19
+ {%- endfor %}
20
+ {{- "\n</tools>\n\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call><|im_end|>\n" }}
21
+ {%- else %}
22
+ {%- if messages[0].role == 'system' %}
23
+ {{- '<|im_start|>system\n' }}
24
+ {%- if messages[0].content is string %}
25
+ {{- messages[0].content }}
26
+ {%- else %}
27
+ {%- for content in messages[0].content %}
28
+ {%- if 'text' in content %}
29
+ {{- content.text }}
30
+ {%- endif %}
31
+ {%- endfor %}
32
+ {%- endif %}
33
+ {{- '<|im_end|>\n' }}
34
+ {%- endif %}
35
+ {%- endif %}
36
+ {%- set image_count = namespace(value=0) %}
37
+ {%- set video_count = namespace(value=0) %}
38
+ {%- for message in messages %}
39
+ {%- if message.role == "user" %}
40
+ {{- '<|im_start|>' + message.role + '\n' }}
41
+ {%- if message.content is string %}
42
+ {{- message.content }}
43
+ {%- else %}
44
+ {%- for content in message.content %}
45
+ {%- if content.type == 'image' or 'image' in content or 'image_url' in content %}
46
+ {%- set image_count.value = image_count.value + 1 %}
47
+ {%- if add_vision_id %}Picture {{ image_count.value }}: {% endif -%}
48
+ <|vision_start|><|image_pad|><|vision_end|>
49
+ {%- elif content.type == 'video' or 'video' in content %}
50
+ {%- set video_count.value = video_count.value + 1 %}
51
+ {%- if add_vision_id %}Video {{ video_count.value }}: {% endif -%}
52
+ <|vision_start|><|video_pad|><|vision_end|>
53
+ {%- elif 'text' in content %}
54
+ {{- content.text }}
55
+ {%- endif %}
56
+ {%- endfor %}
57
+ {%- endif %}
58
+ {{- '<|im_end|>\n' }}
59
+ {%- elif message.role == "assistant" %}
60
+ {{- '<|im_start|>' + message.role + '\n' }}
61
+ {%- if message.content is string %}
62
+ {{- message.content }}
63
+ {%- else %}
64
+ {%- for content_item in message.content %}
65
+ {%- if 'text' in content_item %}
66
+ {{- content_item.text }}
67
+ {%- endif %}
68
+ {%- endfor %}
69
+ {%- endif %}
70
+ {%- if message.tool_calls %}
71
+ {%- for tool_call in message.tool_calls %}
72
+ {%- if (loop.first and message.content) or (not loop.first) %}
73
+ {{- '\n' }}
74
+ {%- endif %}
75
+ {%- if tool_call.function %}
76
+ {%- set tool_call = tool_call.function %}
77
+ {%- endif %}
78
+ {{- '<tool_call>\n{"name": "' }}
79
+ {{- tool_call.name }}
80
+ {{- '", "arguments": ' }}
81
+ {%- if tool_call.arguments is string %}
82
+ {{- tool_call.arguments }}
83
+ {%- else %}
84
+ {{- tool_call.arguments | tojson }}
85
+ {%- endif %}
86
+ {{- '}\n</tool_call>' }}
87
+ {%- endfor %}
88
+ {%- endif %}
89
+ {{- '<|im_end|>\n' }}
90
+ {%- elif message.role == "tool" %}
91
+ {%- if loop.first or (messages[loop.index0 - 1].role != "tool") %}
92
+ {{- '<|im_start|>user' }}
93
+ {%- endif %}
94
+ {{- '\n<tool_response>\n' }}
95
+ {%- if message.content is string %}
96
+ {{- message.content }}
97
+ {%- else %}
98
+ {%- for content in message.content %}
99
+ {%- if content.type == 'image' or 'image' in content or 'image_url' in content %}
100
+ {%- set image_count.value = image_count.value + 1 %}
101
+ {%- if add_vision_id %}Picture {{ image_count.value }}: {% endif -%}
102
+ <|vision_start|><|image_pad|><|vision_end|>
103
+ {%- elif content.type == 'video' or 'video' in content %}
104
+ {%- set video_count.value = video_count.value + 1 %}
105
+ {%- if add_vision_id %}Video {{ video_count.value }}: {% endif -%}
106
+ <|vision_start|><|video_pad|><|vision_end|>
107
+ {%- elif 'text' in content %}
108
+ {{- content.text }}
109
+ {%- endif %}
110
+ {%- endfor %}
111
+ {%- endif %}
112
+ {{- '\n</tool_response>' }}
113
+ {%- if loop.last or (messages[loop.index0 + 1].role != "tool") %}
114
+ {{- '<|im_end|>\n' }}
115
+ {%- endif %}
116
+ {%- endif %}
117
+ {%- endfor %}
118
+ {%- if add_generation_prompt %}
119
+ {{- '<|im_start|>assistant\n' }}
120
+ {%- endif %}
config.json ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "LimeQwen3VLForConditionalGeneration"
4
+ ],
5
+ "auto_map": {
6
+ "AutoConfig": "configuration_qwen3_vl.LimeQwen3VLConfig",
7
+ "AutoModel": "modeling_qwen3_vl.LimeQwen3VLModel",
8
+ "AutoModelForImageTextToText": "modeling_qwen3_vl.LimeQwen3VLForConditionalGeneration",
9
+ "AutoModelForVision2Seq": "modeling_qwen3_vl.LimeQwen3VLForConditionalGeneration",
10
+ "qwen3_vl_text": "configuration_qwen3_vl.LimeQwen3VLTextConfig"
11
+ },
12
+ "dtype": "bfloat16",
13
+ "eos_token_id": 151645,
14
+ "image_token_id": 151655,
15
+ "model_type": "qwen3_vl",
16
+ "pad_token_id": 151643,
17
+ "text_config": {
18
+ "attention_bias": false,
19
+ "attention_dropout": 0.0,
20
+ "bos_token_id": 151643,
21
+ "dtype": "bfloat16",
22
+ "eos_token_id": 151645,
23
+ "head_dim": 128,
24
+ "hidden_act": "silu",
25
+ "hidden_size": 2560,
26
+ "initializer_range": 0.02,
27
+ "intermediate_size": 9728,
28
+ "lime_hidden_size": 512,
29
+ "lime_layers": [
30
+ 5,
31
+ 11,
32
+ 17
33
+ ],
34
+ "max_position_embeddings": 262144,
35
+ "model_type": "qwen3_vl_text",
36
+ "num_attention_heads": 32,
37
+ "num_hidden_layers": 36,
38
+ "num_key_value_heads": 8,
39
+ "rms_norm_eps": 1e-06,
40
+ "rope_scaling": {
41
+ "mrope_interleaved": true,
42
+ "mrope_section": [
43
+ 24,
44
+ 20,
45
+ 20
46
+ ],
47
+ "rope_type": "default"
48
+ },
49
+ "rope_theta": 5000000,
50
+ "tie_word_embeddings": true,
51
+ "use_cache": true,
52
+ "vocab_size": 151936
53
+ },
54
+ "tie_word_embeddings": true,
55
+ "transformers_version": "4.57.3",
56
+ "video_token_id": 151656,
57
+ "vision_config": {
58
+ "deepstack_visual_indexes": [
59
+ 5,
60
+ 11,
61
+ 17
62
+ ],
63
+ "depth": 24,
64
+ "dtype": "bfloat16",
65
+ "hidden_act": "gelu_pytorch_tanh",
66
+ "hidden_size": 1024,
67
+ "in_channels": 3,
68
+ "initializer_range": 0.02,
69
+ "intermediate_size": 4096,
70
+ "model_type": "qwen3_vl",
71
+ "num_heads": 16,
72
+ "num_position_embeddings": 2304,
73
+ "out_hidden_size": 2560,
74
+ "patch_size": 16,
75
+ "spatial_merge_size": 2,
76
+ "temporal_patch_size": 2
77
+ },
78
+ "vision_end_token_id": 151653,
79
+ "vision_start_token_id": 151652
80
+ }
configuration_qwen3_vl.py ADDED
@@ -0,0 +1,301 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
2
+ # This file was automatically generated from src/transformers/models/qwen3_vl/modular_qwen3_vl.py.
3
+ # Do NOT edit this file manually as any edits will be overwritten by the generation of
4
+ # the file from the modular. If any change should be done, please apply the change to the
5
+ # modular_qwen3_vl.py file directly. One of our CI enforces this.
6
+ # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
7
+ # coding=utf-8
8
+ # Copyright 2025 The Qwen Team and The HuggingFace Inc. team. All rights reserved.
9
+ #
10
+ # Licensed under the Apache License, Version 2.0 (the "License");
11
+ # you may not use this file except in compliance with the License.
12
+ # You may obtain a copy of the License at
13
+ #
14
+ # http://www.apache.org/licenses/LICENSE-2.0
15
+ #
16
+ # Unless required by applicable law or agreed to in writing, software
17
+ # distributed under the License is distributed on an "AS IS" BASIS,
18
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
19
+ # See the License for the specific language governing permissions and
20
+ # limitations under the License.
21
+ from transformers.configuration_utils import PretrainedConfig
22
+ from transformers.modeling_rope_utils import rope_config_validation
23
+
24
+
25
+ class Qwen3VLVisionConfig(PretrainedConfig):
26
+ model_type = "qwen3_vl"
27
+ base_config_key = "vision_config"
28
+
29
+ def __init__(
30
+ self,
31
+ depth=27,
32
+ hidden_size=1152,
33
+ hidden_act="gelu_pytorch_tanh",
34
+ intermediate_size=4304,
35
+ num_heads=16,
36
+ in_channels=3,
37
+ patch_size=16,
38
+ spatial_merge_size=2,
39
+ temporal_patch_size=2,
40
+ out_hidden_size=3584,
41
+ num_position_embeddings=2304,
42
+ deepstack_visual_indexes=[8, 16, 24],
43
+ initializer_range=0.02,
44
+ **kwargs,
45
+ ):
46
+ super().__init__(**kwargs)
47
+
48
+ self.depth = depth
49
+ self.hidden_size = hidden_size
50
+ self.hidden_act = hidden_act
51
+ self.intermediate_size = intermediate_size
52
+ self.num_heads = num_heads
53
+ self.in_channels = in_channels
54
+ self.patch_size = patch_size
55
+ self.spatial_merge_size = spatial_merge_size
56
+ self.temporal_patch_size = temporal_patch_size
57
+ self.out_hidden_size = out_hidden_size
58
+ self.num_position_embeddings = num_position_embeddings
59
+ self.initializer_range = initializer_range
60
+ self.deepstack_visual_indexes = deepstack_visual_indexes
61
+
62
+
63
+ class LimeQwen3VLTextConfig(PretrainedConfig):
64
+ r"""
65
+ This is the configuration class to store the configuration of a [`Qwen3VLTextModel`]. It is used to instantiate a
66
+ Qwen3-VL model according to the specified arguments, defining the model architecture. Instantiating a configuration
67
+ with the defaults will yield a similar configuration to that of
68
+ Qwen3-VL-4B-Instruct [Qwen/Qwen3-VL-4B-Instruct](https://huggingface.co/Qwen/Qwen3-VL-4B-Instruct).
69
+
70
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
71
+ documentation from [`PretrainedConfig`] for more information.
72
+
73
+ Args:
74
+ vocab_size (`int`, *optional*, defaults to 151936):
75
+ Vocabulary size of the Qwen3VL model. Defines the number of different tokens that can be represented by the
76
+ `inputs_ids` passed when calling [`Qwen3VLModel`]
77
+ hidden_size (`int`, *optional*, defaults to 4096):
78
+ Dimension of the hidden representations.
79
+ intermediate_size (`int`, *optional*, defaults to 22016):
80
+ Dimension of the MLP representations.
81
+ num_hidden_layers (`int`, *optional*, defaults to 32):
82
+ Number of hidden layers in the Transformer encoder.
83
+ num_attention_heads (`int`, *optional*, defaults to 32):
84
+ Number of attention heads for each attention layer in the Transformer encoder.
85
+ num_key_value_heads (`int`, *optional*, defaults to 32):
86
+ This is the number of key_value heads that should be used to implement Grouped Query Attention. If
87
+ `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
88
+ `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
89
+ converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
90
+ by meanpooling all the original heads within that group. For more details, check out [this
91
+ paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to `32`.
92
+ head_dim (`int`, *optional*, defaults to 128):
93
+ The dimension of the head. If not specified, will default to `hidden_size // num_attention_heads`.
94
+ hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
95
+ The non-linear activation function (function or string) in the decoder.
96
+ max_position_embeddings (`int`, *optional*, defaults to 128000):
97
+ The maximum sequence length that this model might ever be used with.
98
+ initializer_range (`float`, *optional*, defaults to 0.02):
99
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
100
+ rms_norm_eps (`float`, *optional*, defaults to 1e-06):
101
+ The epsilon used by the rms normalization layers.
102
+ use_cache (`bool`, *optional*, defaults to `True`):
103
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
104
+ relevant if `config.is_decoder=True`.
105
+ tie_word_embeddings (`bool`, *optional*, defaults to `False`):
106
+ Whether the model's input and output word embeddings should be tied.
107
+ rope_theta (`float`, *optional*, defaults to 5000000.0):
108
+ The base period of the RoPE embeddings.
109
+ rope_scaling (`Dict`, *optional*):
110
+ Dictionary containing the scaling configuration for the RoPE embeddings. NOTE: if you apply new rope type
111
+ and you expect the model to work on longer `max_position_embeddings`, we recommend you to update this value
112
+ accordingly.
113
+ Expected contents:
114
+ `rope_type` (`str`):
115
+ The sub-variant of RoPE to use. Can be one of ['default', 'linear', 'dynamic', 'yarn', 'longrope',
116
+ 'llama3'], with 'default' being the original RoPE implementation.
117
+ `factor` (`float`, *optional*):
118
+ Used with all rope types except 'default'. The scaling factor to apply to the RoPE embeddings. In
119
+ most scaling types, a `factor` of x will enable the model to handle sequences of length x *
120
+ original maximum pre-trained length.
121
+ `original_max_position_embeddings` (`int`, *optional*):
122
+ Used with 'dynamic', 'longrope' and 'llama3'. The original max position embeddings used during
123
+ pretraining.
124
+ `attention_factor` (`float`, *optional*):
125
+ Used with 'yarn' and 'longrope'. The scaling factor to be applied on the attention
126
+ computation. If unspecified, it defaults to value recommended by the implementation, using the
127
+ `factor` field to infer the suggested value.
128
+ `beta_fast` (`float`, *optional*):
129
+ Only used with 'yarn'. Parameter to set the boundary for extrapolation (only) in the linear
130
+ ramp function. If unspecified, it defaults to 32.
131
+ `beta_slow` (`float`, *optional*):
132
+ Only used with 'yarn'. Parameter to set the boundary for interpolation (only) in the linear
133
+ ramp function. If unspecified, it defaults to 1.
134
+ `short_factor` (`list[float]`, *optional*):
135
+ Only used with 'longrope'. The scaling factor to be applied to short contexts (<
136
+ `original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden
137
+ size divided by the number of attention heads divided by 2
138
+ `long_factor` (`list[float]`, *optional*):
139
+ Only used with 'longrope'. The scaling factor to be applied to long contexts (<
140
+ `original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden
141
+ size divided by the number of attention heads divided by 2
142
+ `low_freq_factor` (`float`, *optional*):
143
+ Only used with 'llama3'. Scaling factor applied to low frequency components of the RoPE
144
+ `high_freq_factor` (`float`, *optional*):
145
+ Only used with 'llama3'. Scaling factor applied to high frequency components of the RoPE
146
+ attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`):
147
+ Whether to use a bias in the query, key, value and output projection layers during self-attention.
148
+ attention_dropout (`float`, *optional*, defaults to 0.0):
149
+ The dropout ratio for the attention probabilities.
150
+ # --- 🚨 新增参数说明 ---
151
+ lime_layers (`list[int]`, *optional*, defaults to `None`):
152
+ The list of layer indices where the Lime (Latent Visual Memory Intervention) block should be added.
153
+ e.g. [4, 8, 12, 16, 20, 24, 28]
154
+ # ---------------------
155
+
156
+ ```python
157
+ >>> from transformers import Qwen3VLTextModel, Qwen3VLTextConfig
158
+
159
+ >>> # Initializing a Qwen3VL style configuration
160
+ >>> configuration = Qwen3VLTextConfig()
161
+
162
+ >>> # Initializing a model from the Qwen3-VL-7B style configuration
163
+ >>> model = Qwen3VLTextModel(configuration)
164
+
165
+ >>> # Accessing the model configuration
166
+ >>> configuration = model.config
167
+ ```"""
168
+
169
+ model_type = "qwen3_vl_text"
170
+ base_config_key = "text_config"
171
+
172
+ def __init__(
173
+ self,
174
+ vocab_size=151936,
175
+ hidden_size=4096,
176
+ lime_hidden_size=512,
177
+ intermediate_size=22016,
178
+ num_hidden_layers=32,
179
+ num_attention_heads=32,
180
+ num_key_value_heads=32,
181
+ head_dim=128,
182
+ hidden_act="silu",
183
+ max_position_embeddings=128000,
184
+ initializer_range=0.02,
185
+ rms_norm_eps=1e-6,
186
+ use_cache=True,
187
+ tie_word_embeddings=False,
188
+ rope_theta=5000000.0,
189
+ rope_scaling=None,
190
+ attention_bias=False,
191
+ attention_dropout=0.0,
192
+ # --- 🚨 LIME CONFIG ARGUMENTS 🚨 ---
193
+ lime_layers=None,
194
+ # -----------------------------------
195
+ **kwargs,
196
+ ):
197
+ self.vocab_size = vocab_size
198
+ self.max_position_embeddings = max_position_embeddings
199
+ self.hidden_size = hidden_size
200
+ self.intermediate_size = intermediate_size
201
+ self.num_hidden_layers = num_hidden_layers
202
+ self.num_attention_heads = num_attention_heads
203
+
204
+ # for backward compatibility
205
+ if num_key_value_heads is None:
206
+ num_key_value_heads = num_attention_heads
207
+
208
+ self.num_key_value_heads = num_key_value_heads
209
+ self.head_dim = head_dim
210
+ self.hidden_act = hidden_act
211
+ self.initializer_range = initializer_range
212
+ self.rms_norm_eps = rms_norm_eps
213
+ self.use_cache = use_cache
214
+ self.rope_theta = rope_theta
215
+ self.rope_scaling = rope_scaling
216
+ self.attention_bias = attention_bias
217
+ self.attention_dropout = attention_dropout
218
+
219
+ # --- 🚨 LIME STATE 🚨 ---
220
+ self.lime_hidden_size = lime_hidden_size
221
+ self.lime_layers = lime_layers
222
+ # ------------------------
223
+
224
+ rope_config_validation(self, ignore_keys={"mrope_section", "mrope_interleaved"})
225
+
226
+ super().__init__(tie_word_embeddings=tie_word_embeddings, **kwargs)
227
+
228
+
229
+ class LimeQwen3VLConfig(PretrainedConfig):
230
+ r"""
231
+ This is the configuration class to store the configuration of a [`Qwen3VLModel`]. It is used to instantiate a
232
+ Qwen3-VL model according to the specified arguments, defining the model architecture. Instantiating a configuration
233
+ with the defaults will yield a similar configuration to that of
234
+ Qwen3-VL-4B-Instruct [Qwen/Qwen3-VL-4B-Instruct](https://huggingface.co/Qwen/Qwen3-VL-4B-Instruct).
235
+
236
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
237
+ documentation from [`PretrainedConfig`] for more information.
238
+
239
+
240
+ Args:
241
+ text_config (`Union[PreTrainedConfig, dict]`, *optional*, defaults to `Qwen3VLTextConfig`):
242
+ The config object or dictionary of the text backbone.
243
+ vision_config (`Union[PreTrainedConfig, dict]`, *optional*, defaults to `Qwen3VLVisionConfig`):
244
+ The config object or dictionary of the vision backbone.
245
+ image_token_id (`int`, *optional*, defaults to 151655):
246
+ The image token index to encode the image prompt.
247
+ video_token_id (`int`, *optional*, defaults to 151656):
248
+ The video token index to encode the image prompt.
249
+ vision_start_token_id (`int`, *optional*, defaults to 151652):
250
+ The start token index to encode the image prompt.
251
+ vision_end_token_id (`int`, *optional*, defaults to 151653):
252
+ The end token index to encode the image prompt.
253
+ tie_word_embeddings (`bool`, *optional*, defaults to `False`):
254
+ Whether to tie the word embeddings.
255
+
256
+ ```python
257
+ >>> from transformers import Qwen3VLForConditionalGeneration, Qwen3VLConfig
258
+
259
+ >>> # Initializing a Qwen3-VL style configuration
260
+ >>> configuration = Qwen3VLConfig()
261
+
262
+ >>> # Initializing a model from the Qwen3-VL-4B style configuration
263
+ >>> model = Qwen3VLForConditionalGeneration(configuration)
264
+
265
+ >>> # Accessing the model configuration
266
+ >>> configuration = model.config
267
+ ```"""
268
+
269
+ model_type = "qwen3_vl"
270
+ sub_configs = {"vision_config": Qwen3VLVisionConfig, "text_config": LimeQwen3VLTextConfig}
271
+ keys_to_ignore_at_inference = ["past_key_values"]
272
+
273
+ def __init__(
274
+ self,
275
+ text_config=None,
276
+ vision_config=None,
277
+ image_token_id=151655,
278
+ video_token_id=151656,
279
+ vision_start_token_id=151652,
280
+ vision_end_token_id=151653,
281
+ tie_word_embeddings=False,
282
+ **kwargs,
283
+ ):
284
+ if isinstance(vision_config, dict):
285
+ self.vision_config = self.sub_configs["vision_config"](**vision_config)
286
+ elif vision_config is None:
287
+ self.vision_config = self.sub_configs["vision_config"]()
288
+
289
+ if isinstance(text_config, dict):
290
+ self.text_config = self.sub_configs["text_config"](**text_config)
291
+ elif text_config is None:
292
+ self.text_config = self.sub_configs["text_config"]()
293
+
294
+ self.image_token_id = image_token_id
295
+ self.video_token_id = video_token_id
296
+ self.vision_start_token_id = vision_start_token_id
297
+ self.vision_end_token_id = vision_end_token_id
298
+ super().__init__(**kwargs, tie_word_embeddings=tie_word_embeddings)
299
+
300
+
301
+ __all__ = ["LimeQwen3VLConfig", "LimeQwen3VLTextConfig"]
generation_config.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_sample": true,
3
+ "eos_token_id": [
4
+ 151645,
5
+ 151645,
6
+ 151643
7
+ ],
8
+ "pad_token_id": 151643,
9
+ "temperature": 0.7,
10
+ "top_k": 20,
11
+ "top_p": 0.8,
12
+ "transformers_version": "4.57.3"
13
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
model-00001-of-00002.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e6c55d60fc17464eacbdf744e0bb20e2c5053c1cce101ef4cd79a3c72eb1148f
3
+ size 4970999828
model-00002-of-00002.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cdcdb23615047af49a370298cdb19d61b3b249135132ee50e29b7273115869c7
3
+ size 3950183626
model.safetensors.index.json ADDED
@@ -0,0 +1,766 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "metadata": {
3
+ "total_parameters": 966915,
4
+ "total_size": 8921089542
5
+ },
6
+ "weight_map": {
7
+ "model.language_model.embed_tokens.weight": "model-00001-of-00002.safetensors",
8
+ "model.language_model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
9
+ "model.language_model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
10
+ "model.language_model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
11
+ "model.language_model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
12
+ "model.language_model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
13
+ "model.language_model.layers.0.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
14
+ "model.language_model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
15
+ "model.language_model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
16
+ "model.language_model.layers.0.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
17
+ "model.language_model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
18
+ "model.language_model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
19
+ "model.language_model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
20
+ "model.language_model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
21
+ "model.language_model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
22
+ "model.language_model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
23
+ "model.language_model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
24
+ "model.language_model.layers.1.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
25
+ "model.language_model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
26
+ "model.language_model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
27
+ "model.language_model.layers.1.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
28
+ "model.language_model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
29
+ "model.language_model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
30
+ "model.language_model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
31
+ "model.language_model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
32
+ "model.language_model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
33
+ "model.language_model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
34
+ "model.language_model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
35
+ "model.language_model.layers.10.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
36
+ "model.language_model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
37
+ "model.language_model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
38
+ "model.language_model.layers.10.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
39
+ "model.language_model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
40
+ "model.language_model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
41
+ "model.language_model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
42
+ "model.language_model.layers.11.lime_block.cross_attn.k_norm.weight": "model-00001-of-00002.safetensors",
43
+ "model.language_model.layers.11.lime_block.cross_attn.k_proj.weight": "model-00001-of-00002.safetensors",
44
+ "model.language_model.layers.11.lime_block.cross_attn.o_proj.weight": "model-00001-of-00002.safetensors",
45
+ "model.language_model.layers.11.lime_block.cross_attn.q_norm.weight": "model-00001-of-00002.safetensors",
46
+ "model.language_model.layers.11.lime_block.cross_attn.q_proj.weight": "model-00001-of-00002.safetensors",
47
+ "model.language_model.layers.11.lime_block.cross_attn.v_proj.weight": "model-00001-of-00002.safetensors",
48
+ "model.language_model.layers.11.lime_block.gate_alpha": "model-00001-of-00002.safetensors",
49
+ "model.language_model.layers.11.lime_block.input_norm.weight": "model-00001-of-00002.safetensors",
50
+ "model.language_model.layers.11.lime_block.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
51
+ "model.language_model.layers.11.lime_block.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
52
+ "model.language_model.layers.11.lime_block.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
53
+ "model.language_model.layers.11.lime_block.output_expander.weight": "model-00001-of-00002.safetensors",
54
+ "model.language_model.layers.11.lime_block.post_attn_norm.weight": "model-00001-of-00002.safetensors",
55
+ "model.language_model.layers.11.lime_block.text_reducer.weight": "model-00001-of-00002.safetensors",
56
+ "model.language_model.layers.11.lime_block.vision_reducer.weight": "model-00001-of-00002.safetensors",
57
+ "model.language_model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
58
+ "model.language_model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
59
+ "model.language_model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
60
+ "model.language_model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
61
+ "model.language_model.layers.11.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
62
+ "model.language_model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
63
+ "model.language_model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
64
+ "model.language_model.layers.11.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
65
+ "model.language_model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
66
+ "model.language_model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
67
+ "model.language_model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
68
+ "model.language_model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
69
+ "model.language_model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
70
+ "model.language_model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
71
+ "model.language_model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
72
+ "model.language_model.layers.12.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
73
+ "model.language_model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
74
+ "model.language_model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
75
+ "model.language_model.layers.12.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
76
+ "model.language_model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
77
+ "model.language_model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
78
+ "model.language_model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
79
+ "model.language_model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
80
+ "model.language_model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
81
+ "model.language_model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
82
+ "model.language_model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
83
+ "model.language_model.layers.13.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
84
+ "model.language_model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
85
+ "model.language_model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
86
+ "model.language_model.layers.13.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
87
+ "model.language_model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
88
+ "model.language_model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
89
+ "model.language_model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
90
+ "model.language_model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
91
+ "model.language_model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
92
+ "model.language_model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
93
+ "model.language_model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
94
+ "model.language_model.layers.14.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
95
+ "model.language_model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
96
+ "model.language_model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
97
+ "model.language_model.layers.14.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
98
+ "model.language_model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
99
+ "model.language_model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
100
+ "model.language_model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
101
+ "model.language_model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
102
+ "model.language_model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
103
+ "model.language_model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
104
+ "model.language_model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
105
+ "model.language_model.layers.15.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
106
+ "model.language_model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
107
+ "model.language_model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
108
+ "model.language_model.layers.15.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
109
+ "model.language_model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
110
+ "model.language_model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
111
+ "model.language_model.layers.16.input_layernorm.weight": "model-00002-of-00002.safetensors",
112
+ "model.language_model.layers.16.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
113
+ "model.language_model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
114
+ "model.language_model.layers.16.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
115
+ "model.language_model.layers.16.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
116
+ "model.language_model.layers.16.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
117
+ "model.language_model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
118
+ "model.language_model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
119
+ "model.language_model.layers.16.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
120
+ "model.language_model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
121
+ "model.language_model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
122
+ "model.language_model.layers.17.input_layernorm.weight": "model-00002-of-00002.safetensors",
123
+ "model.language_model.layers.17.lime_block.cross_attn.k_norm.weight": "model-00002-of-00002.safetensors",
124
+ "model.language_model.layers.17.lime_block.cross_attn.k_proj.weight": "model-00002-of-00002.safetensors",
125
+ "model.language_model.layers.17.lime_block.cross_attn.o_proj.weight": "model-00002-of-00002.safetensors",
126
+ "model.language_model.layers.17.lime_block.cross_attn.q_norm.weight": "model-00002-of-00002.safetensors",
127
+ "model.language_model.layers.17.lime_block.cross_attn.q_proj.weight": "model-00002-of-00002.safetensors",
128
+ "model.language_model.layers.17.lime_block.cross_attn.v_proj.weight": "model-00002-of-00002.safetensors",
129
+ "model.language_model.layers.17.lime_block.gate_alpha": "model-00002-of-00002.safetensors",
130
+ "model.language_model.layers.17.lime_block.input_norm.weight": "model-00002-of-00002.safetensors",
131
+ "model.language_model.layers.17.lime_block.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
132
+ "model.language_model.layers.17.lime_block.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
133
+ "model.language_model.layers.17.lime_block.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
134
+ "model.language_model.layers.17.lime_block.output_expander.weight": "model-00002-of-00002.safetensors",
135
+ "model.language_model.layers.17.lime_block.post_attn_norm.weight": "model-00002-of-00002.safetensors",
136
+ "model.language_model.layers.17.lime_block.text_reducer.weight": "model-00002-of-00002.safetensors",
137
+ "model.language_model.layers.17.lime_block.vision_reducer.weight": "model-00002-of-00002.safetensors",
138
+ "model.language_model.layers.17.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
139
+ "model.language_model.layers.17.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
140
+ "model.language_model.layers.17.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
141
+ "model.language_model.layers.17.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
142
+ "model.language_model.layers.17.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
143
+ "model.language_model.layers.17.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
144
+ "model.language_model.layers.17.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
145
+ "model.language_model.layers.17.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
146
+ "model.language_model.layers.17.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
147
+ "model.language_model.layers.17.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
148
+ "model.language_model.layers.18.input_layernorm.weight": "model-00002-of-00002.safetensors",
149
+ "model.language_model.layers.18.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
150
+ "model.language_model.layers.18.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
151
+ "model.language_model.layers.18.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
152
+ "model.language_model.layers.18.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
153
+ "model.language_model.layers.18.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
154
+ "model.language_model.layers.18.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
155
+ "model.language_model.layers.18.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
156
+ "model.language_model.layers.18.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
157
+ "model.language_model.layers.18.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
158
+ "model.language_model.layers.18.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
159
+ "model.language_model.layers.19.input_layernorm.weight": "model-00002-of-00002.safetensors",
160
+ "model.language_model.layers.19.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
161
+ "model.language_model.layers.19.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
162
+ "model.language_model.layers.19.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
163
+ "model.language_model.layers.19.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
164
+ "model.language_model.layers.19.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
165
+ "model.language_model.layers.19.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
166
+ "model.language_model.layers.19.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
167
+ "model.language_model.layers.19.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
168
+ "model.language_model.layers.19.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
169
+ "model.language_model.layers.19.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
170
+ "model.language_model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
171
+ "model.language_model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
172
+ "model.language_model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
173
+ "model.language_model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
174
+ "model.language_model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
175
+ "model.language_model.layers.2.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
176
+ "model.language_model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
177
+ "model.language_model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
178
+ "model.language_model.layers.2.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
179
+ "model.language_model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
180
+ "model.language_model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
181
+ "model.language_model.layers.20.input_layernorm.weight": "model-00002-of-00002.safetensors",
182
+ "model.language_model.layers.20.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
183
+ "model.language_model.layers.20.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
184
+ "model.language_model.layers.20.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
185
+ "model.language_model.layers.20.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
186
+ "model.language_model.layers.20.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
187
+ "model.language_model.layers.20.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
188
+ "model.language_model.layers.20.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
189
+ "model.language_model.layers.20.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
190
+ "model.language_model.layers.20.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
191
+ "model.language_model.layers.20.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
192
+ "model.language_model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
193
+ "model.language_model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
194
+ "model.language_model.layers.21.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
195
+ "model.language_model.layers.21.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
196
+ "model.language_model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
197
+ "model.language_model.layers.21.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
198
+ "model.language_model.layers.21.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
199
+ "model.language_model.layers.21.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
200
+ "model.language_model.layers.21.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
201
+ "model.language_model.layers.21.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
202
+ "model.language_model.layers.21.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
203
+ "model.language_model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
204
+ "model.language_model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
205
+ "model.language_model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
206
+ "model.language_model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
207
+ "model.language_model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
208
+ "model.language_model.layers.22.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
209
+ "model.language_model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
210
+ "model.language_model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
211
+ "model.language_model.layers.22.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
212
+ "model.language_model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
213
+ "model.language_model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
214
+ "model.language_model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
215
+ "model.language_model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
216
+ "model.language_model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
217
+ "model.language_model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
218
+ "model.language_model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
219
+ "model.language_model.layers.23.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
220
+ "model.language_model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
221
+ "model.language_model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
222
+ "model.language_model.layers.23.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
223
+ "model.language_model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
224
+ "model.language_model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
225
+ "model.language_model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
226
+ "model.language_model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
227
+ "model.language_model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
228
+ "model.language_model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
229
+ "model.language_model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
230
+ "model.language_model.layers.24.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
231
+ "model.language_model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
232
+ "model.language_model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
233
+ "model.language_model.layers.24.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
234
+ "model.language_model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
235
+ "model.language_model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
236
+ "model.language_model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
237
+ "model.language_model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
238
+ "model.language_model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
239
+ "model.language_model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
240
+ "model.language_model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
241
+ "model.language_model.layers.25.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
242
+ "model.language_model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
243
+ "model.language_model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
244
+ "model.language_model.layers.25.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
245
+ "model.language_model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
246
+ "model.language_model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
247
+ "model.language_model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
248
+ "model.language_model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
249
+ "model.language_model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
250
+ "model.language_model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
251
+ "model.language_model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
252
+ "model.language_model.layers.26.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
253
+ "model.language_model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
254
+ "model.language_model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
255
+ "model.language_model.layers.26.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
256
+ "model.language_model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
257
+ "model.language_model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
258
+ "model.language_model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
259
+ "model.language_model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
260
+ "model.language_model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
261
+ "model.language_model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
262
+ "model.language_model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
263
+ "model.language_model.layers.27.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
264
+ "model.language_model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
265
+ "model.language_model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
266
+ "model.language_model.layers.27.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
267
+ "model.language_model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
268
+ "model.language_model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
269
+ "model.language_model.layers.28.input_layernorm.weight": "model-00002-of-00002.safetensors",
270
+ "model.language_model.layers.28.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
271
+ "model.language_model.layers.28.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
272
+ "model.language_model.layers.28.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
273
+ "model.language_model.layers.28.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
274
+ "model.language_model.layers.28.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
275
+ "model.language_model.layers.28.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
276
+ "model.language_model.layers.28.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
277
+ "model.language_model.layers.28.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
278
+ "model.language_model.layers.28.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
279
+ "model.language_model.layers.28.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
280
+ "model.language_model.layers.29.input_layernorm.weight": "model-00002-of-00002.safetensors",
281
+ "model.language_model.layers.29.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
282
+ "model.language_model.layers.29.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
283
+ "model.language_model.layers.29.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
284
+ "model.language_model.layers.29.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
285
+ "model.language_model.layers.29.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
286
+ "model.language_model.layers.29.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
287
+ "model.language_model.layers.29.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
288
+ "model.language_model.layers.29.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
289
+ "model.language_model.layers.29.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
290
+ "model.language_model.layers.29.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
291
+ "model.language_model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
292
+ "model.language_model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
293
+ "model.language_model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
294
+ "model.language_model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
295
+ "model.language_model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
296
+ "model.language_model.layers.3.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
297
+ "model.language_model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
298
+ "model.language_model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
299
+ "model.language_model.layers.3.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
300
+ "model.language_model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
301
+ "model.language_model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
302
+ "model.language_model.layers.30.input_layernorm.weight": "model-00002-of-00002.safetensors",
303
+ "model.language_model.layers.30.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
304
+ "model.language_model.layers.30.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
305
+ "model.language_model.layers.30.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
306
+ "model.language_model.layers.30.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
307
+ "model.language_model.layers.30.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
308
+ "model.language_model.layers.30.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
309
+ "model.language_model.layers.30.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
310
+ "model.language_model.layers.30.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
311
+ "model.language_model.layers.30.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
312
+ "model.language_model.layers.30.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
313
+ "model.language_model.layers.31.input_layernorm.weight": "model-00002-of-00002.safetensors",
314
+ "model.language_model.layers.31.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
315
+ "model.language_model.layers.31.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
316
+ "model.language_model.layers.31.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
317
+ "model.language_model.layers.31.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
318
+ "model.language_model.layers.31.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
319
+ "model.language_model.layers.31.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
320
+ "model.language_model.layers.31.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
321
+ "model.language_model.layers.31.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
322
+ "model.language_model.layers.31.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
323
+ "model.language_model.layers.31.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
324
+ "model.language_model.layers.32.input_layernorm.weight": "model-00002-of-00002.safetensors",
325
+ "model.language_model.layers.32.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
326
+ "model.language_model.layers.32.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
327
+ "model.language_model.layers.32.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
328
+ "model.language_model.layers.32.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
329
+ "model.language_model.layers.32.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
330
+ "model.language_model.layers.32.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
331
+ "model.language_model.layers.32.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
332
+ "model.language_model.layers.32.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
333
+ "model.language_model.layers.32.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
334
+ "model.language_model.layers.32.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
335
+ "model.language_model.layers.33.input_layernorm.weight": "model-00002-of-00002.safetensors",
336
+ "model.language_model.layers.33.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
337
+ "model.language_model.layers.33.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
338
+ "model.language_model.layers.33.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
339
+ "model.language_model.layers.33.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
340
+ "model.language_model.layers.33.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
341
+ "model.language_model.layers.33.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
342
+ "model.language_model.layers.33.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
343
+ "model.language_model.layers.33.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
344
+ "model.language_model.layers.33.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
345
+ "model.language_model.layers.33.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
346
+ "model.language_model.layers.34.input_layernorm.weight": "model-00002-of-00002.safetensors",
347
+ "model.language_model.layers.34.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
348
+ "model.language_model.layers.34.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
349
+ "model.language_model.layers.34.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
350
+ "model.language_model.layers.34.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
351
+ "model.language_model.layers.34.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
352
+ "model.language_model.layers.34.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
353
+ "model.language_model.layers.34.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
354
+ "model.language_model.layers.34.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
355
+ "model.language_model.layers.34.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
356
+ "model.language_model.layers.34.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
357
+ "model.language_model.layers.35.input_layernorm.weight": "model-00002-of-00002.safetensors",
358
+ "model.language_model.layers.35.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
359
+ "model.language_model.layers.35.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
360
+ "model.language_model.layers.35.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
361
+ "model.language_model.layers.35.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
362
+ "model.language_model.layers.35.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
363
+ "model.language_model.layers.35.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
364
+ "model.language_model.layers.35.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
365
+ "model.language_model.layers.35.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
366
+ "model.language_model.layers.35.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
367
+ "model.language_model.layers.35.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
368
+ "model.language_model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
369
+ "model.language_model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
370
+ "model.language_model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
371
+ "model.language_model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
372
+ "model.language_model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
373
+ "model.language_model.layers.4.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
374
+ "model.language_model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
375
+ "model.language_model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
376
+ "model.language_model.layers.4.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
377
+ "model.language_model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
378
+ "model.language_model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
379
+ "model.language_model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
380
+ "model.language_model.layers.5.lime_block.cross_attn.k_norm.weight": "model-00001-of-00002.safetensors",
381
+ "model.language_model.layers.5.lime_block.cross_attn.k_proj.weight": "model-00001-of-00002.safetensors",
382
+ "model.language_model.layers.5.lime_block.cross_attn.o_proj.weight": "model-00001-of-00002.safetensors",
383
+ "model.language_model.layers.5.lime_block.cross_attn.q_norm.weight": "model-00001-of-00002.safetensors",
384
+ "model.language_model.layers.5.lime_block.cross_attn.q_proj.weight": "model-00001-of-00002.safetensors",
385
+ "model.language_model.layers.5.lime_block.cross_attn.v_proj.weight": "model-00001-of-00002.safetensors",
386
+ "model.language_model.layers.5.lime_block.gate_alpha": "model-00001-of-00002.safetensors",
387
+ "model.language_model.layers.5.lime_block.input_norm.weight": "model-00001-of-00002.safetensors",
388
+ "model.language_model.layers.5.lime_block.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
389
+ "model.language_model.layers.5.lime_block.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
390
+ "model.language_model.layers.5.lime_block.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
391
+ "model.language_model.layers.5.lime_block.output_expander.weight": "model-00001-of-00002.safetensors",
392
+ "model.language_model.layers.5.lime_block.post_attn_norm.weight": "model-00001-of-00002.safetensors",
393
+ "model.language_model.layers.5.lime_block.text_reducer.weight": "model-00001-of-00002.safetensors",
394
+ "model.language_model.layers.5.lime_block.vision_reducer.weight": "model-00001-of-00002.safetensors",
395
+ "model.language_model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
396
+ "model.language_model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
397
+ "model.language_model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
398
+ "model.language_model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
399
+ "model.language_model.layers.5.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
400
+ "model.language_model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
401
+ "model.language_model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
402
+ "model.language_model.layers.5.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
403
+ "model.language_model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
404
+ "model.language_model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
405
+ "model.language_model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
406
+ "model.language_model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
407
+ "model.language_model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
408
+ "model.language_model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
409
+ "model.language_model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
410
+ "model.language_model.layers.6.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
411
+ "model.language_model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
412
+ "model.language_model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
413
+ "model.language_model.layers.6.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
414
+ "model.language_model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
415
+ "model.language_model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
416
+ "model.language_model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
417
+ "model.language_model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
418
+ "model.language_model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
419
+ "model.language_model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
420
+ "model.language_model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
421
+ "model.language_model.layers.7.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
422
+ "model.language_model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
423
+ "model.language_model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
424
+ "model.language_model.layers.7.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
425
+ "model.language_model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
426
+ "model.language_model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
427
+ "model.language_model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
428
+ "model.language_model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
429
+ "model.language_model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
430
+ "model.language_model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
431
+ "model.language_model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
432
+ "model.language_model.layers.8.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
433
+ "model.language_model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
434
+ "model.language_model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
435
+ "model.language_model.layers.8.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
436
+ "model.language_model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
437
+ "model.language_model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
438
+ "model.language_model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
439
+ "model.language_model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
440
+ "model.language_model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
441
+ "model.language_model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
442
+ "model.language_model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
443
+ "model.language_model.layers.9.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
444
+ "model.language_model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
445
+ "model.language_model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
446
+ "model.language_model.layers.9.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
447
+ "model.language_model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
448
+ "model.language_model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
449
+ "model.language_model.norm.weight": "model-00002-of-00002.safetensors",
450
+ "model.visual.blocks.0.attn.proj.bias": "model-00001-of-00002.safetensors",
451
+ "model.visual.blocks.0.attn.proj.weight": "model-00001-of-00002.safetensors",
452
+ "model.visual.blocks.0.attn.qkv.bias": "model-00001-of-00002.safetensors",
453
+ "model.visual.blocks.0.attn.qkv.weight": "model-00001-of-00002.safetensors",
454
+ "model.visual.blocks.0.mlp.linear_fc1.bias": "model-00001-of-00002.safetensors",
455
+ "model.visual.blocks.0.mlp.linear_fc1.weight": "model-00001-of-00002.safetensors",
456
+ "model.visual.blocks.0.mlp.linear_fc2.bias": "model-00001-of-00002.safetensors",
457
+ "model.visual.blocks.0.mlp.linear_fc2.weight": "model-00001-of-00002.safetensors",
458
+ "model.visual.blocks.0.norm1.bias": "model-00001-of-00002.safetensors",
459
+ "model.visual.blocks.0.norm1.weight": "model-00001-of-00002.safetensors",
460
+ "model.visual.blocks.0.norm2.bias": "model-00001-of-00002.safetensors",
461
+ "model.visual.blocks.0.norm2.weight": "model-00001-of-00002.safetensors",
462
+ "model.visual.blocks.1.attn.proj.bias": "model-00001-of-00002.safetensors",
463
+ "model.visual.blocks.1.attn.proj.weight": "model-00001-of-00002.safetensors",
464
+ "model.visual.blocks.1.attn.qkv.bias": "model-00001-of-00002.safetensors",
465
+ "model.visual.blocks.1.attn.qkv.weight": "model-00001-of-00002.safetensors",
466
+ "model.visual.blocks.1.mlp.linear_fc1.bias": "model-00001-of-00002.safetensors",
467
+ "model.visual.blocks.1.mlp.linear_fc1.weight": "model-00001-of-00002.safetensors",
468
+ "model.visual.blocks.1.mlp.linear_fc2.bias": "model-00001-of-00002.safetensors",
469
+ "model.visual.blocks.1.mlp.linear_fc2.weight": "model-00001-of-00002.safetensors",
470
+ "model.visual.blocks.1.norm1.bias": "model-00001-of-00002.safetensors",
471
+ "model.visual.blocks.1.norm1.weight": "model-00001-of-00002.safetensors",
472
+ "model.visual.blocks.1.norm2.bias": "model-00001-of-00002.safetensors",
473
+ "model.visual.blocks.1.norm2.weight": "model-00001-of-00002.safetensors",
474
+ "model.visual.blocks.10.attn.proj.bias": "model-00001-of-00002.safetensors",
475
+ "model.visual.blocks.10.attn.proj.weight": "model-00001-of-00002.safetensors",
476
+ "model.visual.blocks.10.attn.qkv.bias": "model-00001-of-00002.safetensors",
477
+ "model.visual.blocks.10.attn.qkv.weight": "model-00001-of-00002.safetensors",
478
+ "model.visual.blocks.10.mlp.linear_fc1.bias": "model-00001-of-00002.safetensors",
479
+ "model.visual.blocks.10.mlp.linear_fc1.weight": "model-00001-of-00002.safetensors",
480
+ "model.visual.blocks.10.mlp.linear_fc2.bias": "model-00001-of-00002.safetensors",
481
+ "model.visual.blocks.10.mlp.linear_fc2.weight": "model-00001-of-00002.safetensors",
482
+ "model.visual.blocks.10.norm1.bias": "model-00001-of-00002.safetensors",
483
+ "model.visual.blocks.10.norm1.weight": "model-00001-of-00002.safetensors",
484
+ "model.visual.blocks.10.norm2.bias": "model-00001-of-00002.safetensors",
485
+ "model.visual.blocks.10.norm2.weight": "model-00001-of-00002.safetensors",
486
+ "model.visual.blocks.11.attn.proj.bias": "model-00001-of-00002.safetensors",
487
+ "model.visual.blocks.11.attn.proj.weight": "model-00001-of-00002.safetensors",
488
+ "model.visual.blocks.11.attn.qkv.bias": "model-00001-of-00002.safetensors",
489
+ "model.visual.blocks.11.attn.qkv.weight": "model-00001-of-00002.safetensors",
490
+ "model.visual.blocks.11.mlp.linear_fc1.bias": "model-00001-of-00002.safetensors",
491
+ "model.visual.blocks.11.mlp.linear_fc1.weight": "model-00001-of-00002.safetensors",
492
+ "model.visual.blocks.11.mlp.linear_fc2.bias": "model-00001-of-00002.safetensors",
493
+ "model.visual.blocks.11.mlp.linear_fc2.weight": "model-00001-of-00002.safetensors",
494
+ "model.visual.blocks.11.norm1.bias": "model-00001-of-00002.safetensors",
495
+ "model.visual.blocks.11.norm1.weight": "model-00001-of-00002.safetensors",
496
+ "model.visual.blocks.11.norm2.bias": "model-00001-of-00002.safetensors",
497
+ "model.visual.blocks.11.norm2.weight": "model-00001-of-00002.safetensors",
498
+ "model.visual.blocks.12.attn.proj.bias": "model-00001-of-00002.safetensors",
499
+ "model.visual.blocks.12.attn.proj.weight": "model-00001-of-00002.safetensors",
500
+ "model.visual.blocks.12.attn.qkv.bias": "model-00001-of-00002.safetensors",
501
+ "model.visual.blocks.12.attn.qkv.weight": "model-00001-of-00002.safetensors",
502
+ "model.visual.blocks.12.mlp.linear_fc1.bias": "model-00001-of-00002.safetensors",
503
+ "model.visual.blocks.12.mlp.linear_fc1.weight": "model-00001-of-00002.safetensors",
504
+ "model.visual.blocks.12.mlp.linear_fc2.bias": "model-00001-of-00002.safetensors",
505
+ "model.visual.blocks.12.mlp.linear_fc2.weight": "model-00001-of-00002.safetensors",
506
+ "model.visual.blocks.12.norm1.bias": "model-00001-of-00002.safetensors",
507
+ "model.visual.blocks.12.norm1.weight": "model-00001-of-00002.safetensors",
508
+ "model.visual.blocks.12.norm2.bias": "model-00001-of-00002.safetensors",
509
+ "model.visual.blocks.12.norm2.weight": "model-00001-of-00002.safetensors",
510
+ "model.visual.blocks.13.attn.proj.bias": "model-00001-of-00002.safetensors",
511
+ "model.visual.blocks.13.attn.proj.weight": "model-00001-of-00002.safetensors",
512
+ "model.visual.blocks.13.attn.qkv.bias": "model-00001-of-00002.safetensors",
513
+ "model.visual.blocks.13.attn.qkv.weight": "model-00001-of-00002.safetensors",
514
+ "model.visual.blocks.13.mlp.linear_fc1.bias": "model-00001-of-00002.safetensors",
515
+ "model.visual.blocks.13.mlp.linear_fc1.weight": "model-00001-of-00002.safetensors",
516
+ "model.visual.blocks.13.mlp.linear_fc2.bias": "model-00001-of-00002.safetensors",
517
+ "model.visual.blocks.13.mlp.linear_fc2.weight": "model-00001-of-00002.safetensors",
518
+ "model.visual.blocks.13.norm1.bias": "model-00001-of-00002.safetensors",
519
+ "model.visual.blocks.13.norm1.weight": "model-00001-of-00002.safetensors",
520
+ "model.visual.blocks.13.norm2.bias": "model-00001-of-00002.safetensors",
521
+ "model.visual.blocks.13.norm2.weight": "model-00001-of-00002.safetensors",
522
+ "model.visual.blocks.14.attn.proj.bias": "model-00001-of-00002.safetensors",
523
+ "model.visual.blocks.14.attn.proj.weight": "model-00001-of-00002.safetensors",
524
+ "model.visual.blocks.14.attn.qkv.bias": "model-00001-of-00002.safetensors",
525
+ "model.visual.blocks.14.attn.qkv.weight": "model-00001-of-00002.safetensors",
526
+ "model.visual.blocks.14.mlp.linear_fc1.bias": "model-00001-of-00002.safetensors",
527
+ "model.visual.blocks.14.mlp.linear_fc1.weight": "model-00001-of-00002.safetensors",
528
+ "model.visual.blocks.14.mlp.linear_fc2.bias": "model-00001-of-00002.safetensors",
529
+ "model.visual.blocks.14.mlp.linear_fc2.weight": "model-00001-of-00002.safetensors",
530
+ "model.visual.blocks.14.norm1.bias": "model-00001-of-00002.safetensors",
531
+ "model.visual.blocks.14.norm1.weight": "model-00001-of-00002.safetensors",
532
+ "model.visual.blocks.14.norm2.bias": "model-00001-of-00002.safetensors",
533
+ "model.visual.blocks.14.norm2.weight": "model-00001-of-00002.safetensors",
534
+ "model.visual.blocks.15.attn.proj.bias": "model-00001-of-00002.safetensors",
535
+ "model.visual.blocks.15.attn.proj.weight": "model-00001-of-00002.safetensors",
536
+ "model.visual.blocks.15.attn.qkv.bias": "model-00001-of-00002.safetensors",
537
+ "model.visual.blocks.15.attn.qkv.weight": "model-00001-of-00002.safetensors",
538
+ "model.visual.blocks.15.mlp.linear_fc1.bias": "model-00001-of-00002.safetensors",
539
+ "model.visual.blocks.15.mlp.linear_fc1.weight": "model-00001-of-00002.safetensors",
540
+ "model.visual.blocks.15.mlp.linear_fc2.bias": "model-00001-of-00002.safetensors",
541
+ "model.visual.blocks.15.mlp.linear_fc2.weight": "model-00001-of-00002.safetensors",
542
+ "model.visual.blocks.15.norm1.bias": "model-00001-of-00002.safetensors",
543
+ "model.visual.blocks.15.norm1.weight": "model-00001-of-00002.safetensors",
544
+ "model.visual.blocks.15.norm2.bias": "model-00001-of-00002.safetensors",
545
+ "model.visual.blocks.15.norm2.weight": "model-00001-of-00002.safetensors",
546
+ "model.visual.blocks.16.attn.proj.bias": "model-00001-of-00002.safetensors",
547
+ "model.visual.blocks.16.attn.proj.weight": "model-00001-of-00002.safetensors",
548
+ "model.visual.blocks.16.attn.qkv.bias": "model-00001-of-00002.safetensors",
549
+ "model.visual.blocks.16.attn.qkv.weight": "model-00001-of-00002.safetensors",
550
+ "model.visual.blocks.16.mlp.linear_fc1.bias": "model-00001-of-00002.safetensors",
551
+ "model.visual.blocks.16.mlp.linear_fc1.weight": "model-00001-of-00002.safetensors",
552
+ "model.visual.blocks.16.mlp.linear_fc2.bias": "model-00001-of-00002.safetensors",
553
+ "model.visual.blocks.16.mlp.linear_fc2.weight": "model-00001-of-00002.safetensors",
554
+ "model.visual.blocks.16.norm1.bias": "model-00001-of-00002.safetensors",
555
+ "model.visual.blocks.16.norm1.weight": "model-00001-of-00002.safetensors",
556
+ "model.visual.blocks.16.norm2.bias": "model-00001-of-00002.safetensors",
557
+ "model.visual.blocks.16.norm2.weight": "model-00001-of-00002.safetensors",
558
+ "model.visual.blocks.17.attn.proj.bias": "model-00001-of-00002.safetensors",
559
+ "model.visual.blocks.17.attn.proj.weight": "model-00001-of-00002.safetensors",
560
+ "model.visual.blocks.17.attn.qkv.bias": "model-00001-of-00002.safetensors",
561
+ "model.visual.blocks.17.attn.qkv.weight": "model-00001-of-00002.safetensors",
562
+ "model.visual.blocks.17.mlp.linear_fc1.bias": "model-00001-of-00002.safetensors",
563
+ "model.visual.blocks.17.mlp.linear_fc1.weight": "model-00001-of-00002.safetensors",
564
+ "model.visual.blocks.17.mlp.linear_fc2.bias": "model-00001-of-00002.safetensors",
565
+ "model.visual.blocks.17.mlp.linear_fc2.weight": "model-00001-of-00002.safetensors",
566
+ "model.visual.blocks.17.norm1.bias": "model-00001-of-00002.safetensors",
567
+ "model.visual.blocks.17.norm1.weight": "model-00001-of-00002.safetensors",
568
+ "model.visual.blocks.17.norm2.bias": "model-00001-of-00002.safetensors",
569
+ "model.visual.blocks.17.norm2.weight": "model-00001-of-00002.safetensors",
570
+ "model.visual.blocks.18.attn.proj.bias": "model-00001-of-00002.safetensors",
571
+ "model.visual.blocks.18.attn.proj.weight": "model-00001-of-00002.safetensors",
572
+ "model.visual.blocks.18.attn.qkv.bias": "model-00001-of-00002.safetensors",
573
+ "model.visual.blocks.18.attn.qkv.weight": "model-00001-of-00002.safetensors",
574
+ "model.visual.blocks.18.mlp.linear_fc1.bias": "model-00001-of-00002.safetensors",
575
+ "model.visual.blocks.18.mlp.linear_fc1.weight": "model-00001-of-00002.safetensors",
576
+ "model.visual.blocks.18.mlp.linear_fc2.bias": "model-00001-of-00002.safetensors",
577
+ "model.visual.blocks.18.mlp.linear_fc2.weight": "model-00001-of-00002.safetensors",
578
+ "model.visual.blocks.18.norm1.bias": "model-00001-of-00002.safetensors",
579
+ "model.visual.blocks.18.norm1.weight": "model-00001-of-00002.safetensors",
580
+ "model.visual.blocks.18.norm2.bias": "model-00001-of-00002.safetensors",
581
+ "model.visual.blocks.18.norm2.weight": "model-00001-of-00002.safetensors",
582
+ "model.visual.blocks.19.attn.proj.bias": "model-00001-of-00002.safetensors",
583
+ "model.visual.blocks.19.attn.proj.weight": "model-00001-of-00002.safetensors",
584
+ "model.visual.blocks.19.attn.qkv.bias": "model-00001-of-00002.safetensors",
585
+ "model.visual.blocks.19.attn.qkv.weight": "model-00001-of-00002.safetensors",
586
+ "model.visual.blocks.19.mlp.linear_fc1.bias": "model-00001-of-00002.safetensors",
587
+ "model.visual.blocks.19.mlp.linear_fc1.weight": "model-00001-of-00002.safetensors",
588
+ "model.visual.blocks.19.mlp.linear_fc2.bias": "model-00001-of-00002.safetensors",
589
+ "model.visual.blocks.19.mlp.linear_fc2.weight": "model-00001-of-00002.safetensors",
590
+ "model.visual.blocks.19.norm1.bias": "model-00001-of-00002.safetensors",
591
+ "model.visual.blocks.19.norm1.weight": "model-00001-of-00002.safetensors",
592
+ "model.visual.blocks.19.norm2.bias": "model-00001-of-00002.safetensors",
593
+ "model.visual.blocks.19.norm2.weight": "model-00001-of-00002.safetensors",
594
+ "model.visual.blocks.2.attn.proj.bias": "model-00001-of-00002.safetensors",
595
+ "model.visual.blocks.2.attn.proj.weight": "model-00001-of-00002.safetensors",
596
+ "model.visual.blocks.2.attn.qkv.bias": "model-00001-of-00002.safetensors",
597
+ "model.visual.blocks.2.attn.qkv.weight": "model-00001-of-00002.safetensors",
598
+ "model.visual.blocks.2.mlp.linear_fc1.bias": "model-00001-of-00002.safetensors",
599
+ "model.visual.blocks.2.mlp.linear_fc1.weight": "model-00001-of-00002.safetensors",
600
+ "model.visual.blocks.2.mlp.linear_fc2.bias": "model-00001-of-00002.safetensors",
601
+ "model.visual.blocks.2.mlp.linear_fc2.weight": "model-00001-of-00002.safetensors",
602
+ "model.visual.blocks.2.norm1.bias": "model-00001-of-00002.safetensors",
603
+ "model.visual.blocks.2.norm1.weight": "model-00001-of-00002.safetensors",
604
+ "model.visual.blocks.2.norm2.bias": "model-00001-of-00002.safetensors",
605
+ "model.visual.blocks.2.norm2.weight": "model-00001-of-00002.safetensors",
606
+ "model.visual.blocks.20.attn.proj.bias": "model-00001-of-00002.safetensors",
607
+ "model.visual.blocks.20.attn.proj.weight": "model-00001-of-00002.safetensors",
608
+ "model.visual.blocks.20.attn.qkv.bias": "model-00001-of-00002.safetensors",
609
+ "model.visual.blocks.20.attn.qkv.weight": "model-00001-of-00002.safetensors",
610
+ "model.visual.blocks.20.mlp.linear_fc1.bias": "model-00001-of-00002.safetensors",
611
+ "model.visual.blocks.20.mlp.linear_fc1.weight": "model-00001-of-00002.safetensors",
612
+ "model.visual.blocks.20.mlp.linear_fc2.bias": "model-00001-of-00002.safetensors",
613
+ "model.visual.blocks.20.mlp.linear_fc2.weight": "model-00001-of-00002.safetensors",
614
+ "model.visual.blocks.20.norm1.bias": "model-00001-of-00002.safetensors",
615
+ "model.visual.blocks.20.norm1.weight": "model-00001-of-00002.safetensors",
616
+ "model.visual.blocks.20.norm2.bias": "model-00001-of-00002.safetensors",
617
+ "model.visual.blocks.20.norm2.weight": "model-00001-of-00002.safetensors",
618
+ "model.visual.blocks.21.attn.proj.bias": "model-00001-of-00002.safetensors",
619
+ "model.visual.blocks.21.attn.proj.weight": "model-00001-of-00002.safetensors",
620
+ "model.visual.blocks.21.attn.qkv.bias": "model-00001-of-00002.safetensors",
621
+ "model.visual.blocks.21.attn.qkv.weight": "model-00001-of-00002.safetensors",
622
+ "model.visual.blocks.21.mlp.linear_fc1.bias": "model-00001-of-00002.safetensors",
623
+ "model.visual.blocks.21.mlp.linear_fc1.weight": "model-00001-of-00002.safetensors",
624
+ "model.visual.blocks.21.mlp.linear_fc2.bias": "model-00001-of-00002.safetensors",
625
+ "model.visual.blocks.21.mlp.linear_fc2.weight": "model-00001-of-00002.safetensors",
626
+ "model.visual.blocks.21.norm1.bias": "model-00001-of-00002.safetensors",
627
+ "model.visual.blocks.21.norm1.weight": "model-00001-of-00002.safetensors",
628
+ "model.visual.blocks.21.norm2.bias": "model-00001-of-00002.safetensors",
629
+ "model.visual.blocks.21.norm2.weight": "model-00001-of-00002.safetensors",
630
+ "model.visual.blocks.22.attn.proj.bias": "model-00001-of-00002.safetensors",
631
+ "model.visual.blocks.22.attn.proj.weight": "model-00001-of-00002.safetensors",
632
+ "model.visual.blocks.22.attn.qkv.bias": "model-00001-of-00002.safetensors",
633
+ "model.visual.blocks.22.attn.qkv.weight": "model-00001-of-00002.safetensors",
634
+ "model.visual.blocks.22.mlp.linear_fc1.bias": "model-00001-of-00002.safetensors",
635
+ "model.visual.blocks.22.mlp.linear_fc1.weight": "model-00001-of-00002.safetensors",
636
+ "model.visual.blocks.22.mlp.linear_fc2.bias": "model-00001-of-00002.safetensors",
637
+ "model.visual.blocks.22.mlp.linear_fc2.weight": "model-00001-of-00002.safetensors",
638
+ "model.visual.blocks.22.norm1.bias": "model-00001-of-00002.safetensors",
639
+ "model.visual.blocks.22.norm1.weight": "model-00001-of-00002.safetensors",
640
+ "model.visual.blocks.22.norm2.bias": "model-00001-of-00002.safetensors",
641
+ "model.visual.blocks.22.norm2.weight": "model-00001-of-00002.safetensors",
642
+ "model.visual.blocks.23.attn.proj.bias": "model-00001-of-00002.safetensors",
643
+ "model.visual.blocks.23.attn.proj.weight": "model-00001-of-00002.safetensors",
644
+ "model.visual.blocks.23.attn.qkv.bias": "model-00001-of-00002.safetensors",
645
+ "model.visual.blocks.23.attn.qkv.weight": "model-00001-of-00002.safetensors",
646
+ "model.visual.blocks.23.mlp.linear_fc1.bias": "model-00001-of-00002.safetensors",
647
+ "model.visual.blocks.23.mlp.linear_fc1.weight": "model-00001-of-00002.safetensors",
648
+ "model.visual.blocks.23.mlp.linear_fc2.bias": "model-00001-of-00002.safetensors",
649
+ "model.visual.blocks.23.mlp.linear_fc2.weight": "model-00001-of-00002.safetensors",
650
+ "model.visual.blocks.23.norm1.bias": "model-00001-of-00002.safetensors",
651
+ "model.visual.blocks.23.norm1.weight": "model-00001-of-00002.safetensors",
652
+ "model.visual.blocks.23.norm2.bias": "model-00001-of-00002.safetensors",
653
+ "model.visual.blocks.23.norm2.weight": "model-00001-of-00002.safetensors",
654
+ "model.visual.blocks.3.attn.proj.bias": "model-00001-of-00002.safetensors",
655
+ "model.visual.blocks.3.attn.proj.weight": "model-00001-of-00002.safetensors",
656
+ "model.visual.blocks.3.attn.qkv.bias": "model-00001-of-00002.safetensors",
657
+ "model.visual.blocks.3.attn.qkv.weight": "model-00001-of-00002.safetensors",
658
+ "model.visual.blocks.3.mlp.linear_fc1.bias": "model-00001-of-00002.safetensors",
659
+ "model.visual.blocks.3.mlp.linear_fc1.weight": "model-00001-of-00002.safetensors",
660
+ "model.visual.blocks.3.mlp.linear_fc2.bias": "model-00001-of-00002.safetensors",
661
+ "model.visual.blocks.3.mlp.linear_fc2.weight": "model-00001-of-00002.safetensors",
662
+ "model.visual.blocks.3.norm1.bias": "model-00001-of-00002.safetensors",
663
+ "model.visual.blocks.3.norm1.weight": "model-00001-of-00002.safetensors",
664
+ "model.visual.blocks.3.norm2.bias": "model-00001-of-00002.safetensors",
665
+ "model.visual.blocks.3.norm2.weight": "model-00001-of-00002.safetensors",
666
+ "model.visual.blocks.4.attn.proj.bias": "model-00001-of-00002.safetensors",
667
+ "model.visual.blocks.4.attn.proj.weight": "model-00001-of-00002.safetensors",
668
+ "model.visual.blocks.4.attn.qkv.bias": "model-00001-of-00002.safetensors",
669
+ "model.visual.blocks.4.attn.qkv.weight": "model-00001-of-00002.safetensors",
670
+ "model.visual.blocks.4.mlp.linear_fc1.bias": "model-00001-of-00002.safetensors",
671
+ "model.visual.blocks.4.mlp.linear_fc1.weight": "model-00001-of-00002.safetensors",
672
+ "model.visual.blocks.4.mlp.linear_fc2.bias": "model-00001-of-00002.safetensors",
673
+ "model.visual.blocks.4.mlp.linear_fc2.weight": "model-00001-of-00002.safetensors",
674
+ "model.visual.blocks.4.norm1.bias": "model-00001-of-00002.safetensors",
675
+ "model.visual.blocks.4.norm1.weight": "model-00001-of-00002.safetensors",
676
+ "model.visual.blocks.4.norm2.bias": "model-00001-of-00002.safetensors",
677
+ "model.visual.blocks.4.norm2.weight": "model-00001-of-00002.safetensors",
678
+ "model.visual.blocks.5.attn.proj.bias": "model-00001-of-00002.safetensors",
679
+ "model.visual.blocks.5.attn.proj.weight": "model-00001-of-00002.safetensors",
680
+ "model.visual.blocks.5.attn.qkv.bias": "model-00001-of-00002.safetensors",
681
+ "model.visual.blocks.5.attn.qkv.weight": "model-00001-of-00002.safetensors",
682
+ "model.visual.blocks.5.mlp.linear_fc1.bias": "model-00001-of-00002.safetensors",
683
+ "model.visual.blocks.5.mlp.linear_fc1.weight": "model-00001-of-00002.safetensors",
684
+ "model.visual.blocks.5.mlp.linear_fc2.bias": "model-00001-of-00002.safetensors",
685
+ "model.visual.blocks.5.mlp.linear_fc2.weight": "model-00001-of-00002.safetensors",
686
+ "model.visual.blocks.5.norm1.bias": "model-00001-of-00002.safetensors",
687
+ "model.visual.blocks.5.norm1.weight": "model-00001-of-00002.safetensors",
688
+ "model.visual.blocks.5.norm2.bias": "model-00001-of-00002.safetensors",
689
+ "model.visual.blocks.5.norm2.weight": "model-00001-of-00002.safetensors",
690
+ "model.visual.blocks.6.attn.proj.bias": "model-00001-of-00002.safetensors",
691
+ "model.visual.blocks.6.attn.proj.weight": "model-00001-of-00002.safetensors",
692
+ "model.visual.blocks.6.attn.qkv.bias": "model-00001-of-00002.safetensors",
693
+ "model.visual.blocks.6.attn.qkv.weight": "model-00001-of-00002.safetensors",
694
+ "model.visual.blocks.6.mlp.linear_fc1.bias": "model-00001-of-00002.safetensors",
695
+ "model.visual.blocks.6.mlp.linear_fc1.weight": "model-00001-of-00002.safetensors",
696
+ "model.visual.blocks.6.mlp.linear_fc2.bias": "model-00001-of-00002.safetensors",
697
+ "model.visual.blocks.6.mlp.linear_fc2.weight": "model-00001-of-00002.safetensors",
698
+ "model.visual.blocks.6.norm1.bias": "model-00001-of-00002.safetensors",
699
+ "model.visual.blocks.6.norm1.weight": "model-00001-of-00002.safetensors",
700
+ "model.visual.blocks.6.norm2.bias": "model-00001-of-00002.safetensors",
701
+ "model.visual.blocks.6.norm2.weight": "model-00001-of-00002.safetensors",
702
+ "model.visual.blocks.7.attn.proj.bias": "model-00001-of-00002.safetensors",
703
+ "model.visual.blocks.7.attn.proj.weight": "model-00001-of-00002.safetensors",
704
+ "model.visual.blocks.7.attn.qkv.bias": "model-00001-of-00002.safetensors",
705
+ "model.visual.blocks.7.attn.qkv.weight": "model-00001-of-00002.safetensors",
706
+ "model.visual.blocks.7.mlp.linear_fc1.bias": "model-00001-of-00002.safetensors",
707
+ "model.visual.blocks.7.mlp.linear_fc1.weight": "model-00001-of-00002.safetensors",
708
+ "model.visual.blocks.7.mlp.linear_fc2.bias": "model-00001-of-00002.safetensors",
709
+ "model.visual.blocks.7.mlp.linear_fc2.weight": "model-00001-of-00002.safetensors",
710
+ "model.visual.blocks.7.norm1.bias": "model-00001-of-00002.safetensors",
711
+ "model.visual.blocks.7.norm1.weight": "model-00001-of-00002.safetensors",
712
+ "model.visual.blocks.7.norm2.bias": "model-00001-of-00002.safetensors",
713
+ "model.visual.blocks.7.norm2.weight": "model-00001-of-00002.safetensors",
714
+ "model.visual.blocks.8.attn.proj.bias": "model-00001-of-00002.safetensors",
715
+ "model.visual.blocks.8.attn.proj.weight": "model-00001-of-00002.safetensors",
716
+ "model.visual.blocks.8.attn.qkv.bias": "model-00001-of-00002.safetensors",
717
+ "model.visual.blocks.8.attn.qkv.weight": "model-00001-of-00002.safetensors",
718
+ "model.visual.blocks.8.mlp.linear_fc1.bias": "model-00001-of-00002.safetensors",
719
+ "model.visual.blocks.8.mlp.linear_fc1.weight": "model-00001-of-00002.safetensors",
720
+ "model.visual.blocks.8.mlp.linear_fc2.bias": "model-00001-of-00002.safetensors",
721
+ "model.visual.blocks.8.mlp.linear_fc2.weight": "model-00001-of-00002.safetensors",
722
+ "model.visual.blocks.8.norm1.bias": "model-00001-of-00002.safetensors",
723
+ "model.visual.blocks.8.norm1.weight": "model-00001-of-00002.safetensors",
724
+ "model.visual.blocks.8.norm2.bias": "model-00001-of-00002.safetensors",
725
+ "model.visual.blocks.8.norm2.weight": "model-00001-of-00002.safetensors",
726
+ "model.visual.blocks.9.attn.proj.bias": "model-00001-of-00002.safetensors",
727
+ "model.visual.blocks.9.attn.proj.weight": "model-00001-of-00002.safetensors",
728
+ "model.visual.blocks.9.attn.qkv.bias": "model-00001-of-00002.safetensors",
729
+ "model.visual.blocks.9.attn.qkv.weight": "model-00001-of-00002.safetensors",
730
+ "model.visual.blocks.9.mlp.linear_fc1.bias": "model-00001-of-00002.safetensors",
731
+ "model.visual.blocks.9.mlp.linear_fc1.weight": "model-00001-of-00002.safetensors",
732
+ "model.visual.blocks.9.mlp.linear_fc2.bias": "model-00001-of-00002.safetensors",
733
+ "model.visual.blocks.9.mlp.linear_fc2.weight": "model-00001-of-00002.safetensors",
734
+ "model.visual.blocks.9.norm1.bias": "model-00001-of-00002.safetensors",
735
+ "model.visual.blocks.9.norm1.weight": "model-00001-of-00002.safetensors",
736
+ "model.visual.blocks.9.norm2.bias": "model-00001-of-00002.safetensors",
737
+ "model.visual.blocks.9.norm2.weight": "model-00001-of-00002.safetensors",
738
+ "model.visual.deepstack_merger_list.0.linear_fc1.bias": "model-00001-of-00002.safetensors",
739
+ "model.visual.deepstack_merger_list.0.linear_fc1.weight": "model-00001-of-00002.safetensors",
740
+ "model.visual.deepstack_merger_list.0.linear_fc2.bias": "model-00001-of-00002.safetensors",
741
+ "model.visual.deepstack_merger_list.0.linear_fc2.weight": "model-00001-of-00002.safetensors",
742
+ "model.visual.deepstack_merger_list.0.norm.bias": "model-00001-of-00002.safetensors",
743
+ "model.visual.deepstack_merger_list.0.norm.weight": "model-00001-of-00002.safetensors",
744
+ "model.visual.deepstack_merger_list.1.linear_fc1.bias": "model-00001-of-00002.safetensors",
745
+ "model.visual.deepstack_merger_list.1.linear_fc1.weight": "model-00001-of-00002.safetensors",
746
+ "model.visual.deepstack_merger_list.1.linear_fc2.bias": "model-00001-of-00002.safetensors",
747
+ "model.visual.deepstack_merger_list.1.linear_fc2.weight": "model-00001-of-00002.safetensors",
748
+ "model.visual.deepstack_merger_list.1.norm.bias": "model-00001-of-00002.safetensors",
749
+ "model.visual.deepstack_merger_list.1.norm.weight": "model-00001-of-00002.safetensors",
750
+ "model.visual.deepstack_merger_list.2.linear_fc1.bias": "model-00001-of-00002.safetensors",
751
+ "model.visual.deepstack_merger_list.2.linear_fc1.weight": "model-00001-of-00002.safetensors",
752
+ "model.visual.deepstack_merger_list.2.linear_fc2.bias": "model-00001-of-00002.safetensors",
753
+ "model.visual.deepstack_merger_list.2.linear_fc2.weight": "model-00001-of-00002.safetensors",
754
+ "model.visual.deepstack_merger_list.2.norm.bias": "model-00001-of-00002.safetensors",
755
+ "model.visual.deepstack_merger_list.2.norm.weight": "model-00001-of-00002.safetensors",
756
+ "model.visual.merger.linear_fc1.bias": "model-00001-of-00002.safetensors",
757
+ "model.visual.merger.linear_fc1.weight": "model-00001-of-00002.safetensors",
758
+ "model.visual.merger.linear_fc2.bias": "model-00001-of-00002.safetensors",
759
+ "model.visual.merger.linear_fc2.weight": "model-00001-of-00002.safetensors",
760
+ "model.visual.merger.norm.bias": "model-00001-of-00002.safetensors",
761
+ "model.visual.merger.norm.weight": "model-00001-of-00002.safetensors",
762
+ "model.visual.patch_embed.proj.bias": "model-00001-of-00002.safetensors",
763
+ "model.visual.patch_embed.proj.weight": "model-00001-of-00002.safetensors",
764
+ "model.visual.pos_embed.weight": "model-00001-of-00002.safetensors"
765
+ }
766
+ }
modeling_qwen3_vl.py ADDED
@@ -0,0 +1,1941 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
2
+ # This file was automatically generated from src/transformers/models/qwen3_vl/modular_qwen3_vl.py.
3
+ # Do NOT edit this file manually as any edits will be overwritten by the generation of
4
+ # the file from the modular. If any change should be done, please apply the change to the
5
+ # modular_qwen3_vl.py file directly. One of our CI enforces this.
6
+ # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
7
+ # coding=utf-8
8
+ # Copyright 2025 The Qwen Team and The HuggingFace Inc. team. All rights reserved.
9
+ #
10
+ # Licensed under the Apache License, Version 2.0 (the "License");
11
+ # you may not use this file except in compliance with the License.
12
+ # You may obtain a copy of the License at
13
+ #
14
+ # http://www.apache.org/licenses/LICENSE-2.0
15
+ #
16
+ # Unless required by applicable law or agreed to in writing, software
17
+ # distributed under the License is distributed on an "AS IS" BASIS,
18
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
19
+ # See the License for the specific language governing permissions and
20
+ # limitations under the License.
21
+
22
+ from dataclasses import dataclass
23
+ from typing import Any, Callable, Optional, Union
24
+
25
+ import torch
26
+ import torch.nn as nn
27
+ import torch.nn.functional as F
28
+
29
+ from transformers.activations import ACT2FN
30
+ from transformers.cache_utils import Cache, DynamicCache
31
+ from transformers.generation import GenerationMixin
32
+ from transformers.integrations import use_kernel_forward_from_hub
33
+ from transformers.masking_utils import create_causal_mask
34
+ from transformers.modeling_flash_attention_utils import FlashAttentionKwargs
35
+ from transformers.modeling_layers import GradientCheckpointingLayer
36
+ from transformers.modeling_outputs import BaseModelOutputWithPast, ModelOutput
37
+ from transformers.modeling_rope_utils import ROPE_INIT_FUNCTIONS, dynamic_rope_update
38
+ from transformers.modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
39
+ from transformers.processing_utils import Unpack
40
+ from transformers.utils import TransformersKwargs, auto_docstring, is_torchdynamo_compiling
41
+ from transformers.utils.deprecation import deprecate_kwarg
42
+ from transformers.utils.generic import check_model_inputs
43
+ from .configuration_qwen3_vl import LimeQwen3VLConfig, LimeQwen3VLTextConfig, Qwen3VLVisionConfig
44
+
45
+ from torch.nn.utils.rnn import pad_sequence
46
+
47
+
48
+ class Qwen3VLVisionMLP(nn.Module):
49
+ def __init__(self, config):
50
+ super().__init__()
51
+ self.hidden_size = config.hidden_size
52
+ self.intermediate_size = config.intermediate_size
53
+ self.linear_fc1 = nn.Linear(self.hidden_size, self.intermediate_size, bias=True)
54
+ self.linear_fc2 = nn.Linear(self.intermediate_size, self.hidden_size, bias=True)
55
+ self.act_fn = ACT2FN[config.hidden_act]
56
+
57
+ def forward(self, hidden_state):
58
+ return self.linear_fc2(self.act_fn(self.linear_fc1(hidden_state)))
59
+
60
+
61
+ class Qwen3VLVisionPatchEmbed(nn.Module):
62
+ def __init__(self, config) -> None:
63
+ super().__init__()
64
+ self.patch_size = config.patch_size
65
+ self.temporal_patch_size = config.temporal_patch_size
66
+ self.in_channels = config.in_channels
67
+ self.embed_dim = config.hidden_size
68
+
69
+ kernel_size = [self.temporal_patch_size, self.patch_size, self.patch_size]
70
+ self.proj = nn.Conv3d(self.in_channels, self.embed_dim, kernel_size=kernel_size, stride=kernel_size, bias=True)
71
+
72
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
73
+ target_dtype = self.proj.weight.dtype
74
+ hidden_states = hidden_states.view(
75
+ -1, self.in_channels, self.temporal_patch_size, self.patch_size, self.patch_size
76
+ )
77
+ hidden_states = self.proj(hidden_states.to(dtype=target_dtype)).view(-1, self.embed_dim)
78
+ return hidden_states
79
+
80
+
81
+ class Qwen3VLVisionRotaryEmbedding(nn.Module):
82
+ inv_freq: torch.Tensor # fix linting for `register_buffer`
83
+
84
+ def __init__(self, dim: int, theta: float = 10000.0) -> None:
85
+ super().__init__()
86
+ inv_freq = 1.0 / (theta ** (torch.arange(0, dim, 2, dtype=torch.float) / dim))
87
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
88
+
89
+ def forward(self, seqlen: int) -> torch.Tensor:
90
+ seq = torch.arange(seqlen, device=self.inv_freq.device, dtype=self.inv_freq.dtype)
91
+ freqs = torch.outer(seq, self.inv_freq)
92
+ return freqs
93
+
94
+
95
+ class Qwen3VLVisionPatchMerger(nn.Module):
96
+ def __init__(self, config: Qwen3VLVisionConfig, use_postshuffle_norm=False) -> None:
97
+ super().__init__()
98
+ self.hidden_size = config.hidden_size * (config.spatial_merge_size**2)
99
+ self.use_postshuffle_norm = use_postshuffle_norm
100
+ self.norm = nn.LayerNorm(self.hidden_size if use_postshuffle_norm else config.hidden_size, eps=1e-6)
101
+ self.linear_fc1 = nn.Linear(self.hidden_size, self.hidden_size)
102
+ self.act_fn = nn.GELU()
103
+ self.linear_fc2 = nn.Linear(self.hidden_size, config.out_hidden_size)
104
+
105
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
106
+ x = self.norm(x.view(-1, self.hidden_size) if self.use_postshuffle_norm else x).view(-1, self.hidden_size)
107
+ x = self.linear_fc2(self.act_fn(self.linear_fc1(x)))
108
+ return x
109
+
110
+
111
+ def rotate_half(x):
112
+ """Rotates half the hidden dims of the input."""
113
+ x1 = x[..., : x.shape[-1] // 2]
114
+ x2 = x[..., x.shape[-1] // 2 :]
115
+ return torch.cat((-x2, x1), dim=-1)
116
+
117
+
118
+ def apply_rotary_pos_emb_vision(
119
+ q: torch.Tensor, k: torch.Tensor, cos: torch.Tensor, sin: torch.Tensor
120
+ ) -> tuple[torch.Tensor, torch.Tensor]:
121
+ orig_q_dtype = q.dtype
122
+ orig_k_dtype = k.dtype
123
+ q, k = q.float(), k.float()
124
+ cos, sin = cos.unsqueeze(-2).float(), sin.unsqueeze(-2).float()
125
+ q_embed = (q * cos) + (rotate_half(q) * sin)
126
+ k_embed = (k * cos) + (rotate_half(k) * sin)
127
+ q_embed = q_embed.to(orig_q_dtype)
128
+ k_embed = k_embed.to(orig_k_dtype)
129
+ return q_embed, k_embed
130
+
131
+
132
+ def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
133
+ """
134
+ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
135
+ num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
136
+ """
137
+ batch, num_key_value_heads, slen, head_dim = hidden_states.shape
138
+ if n_rep == 1:
139
+ return hidden_states
140
+ hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
141
+ return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
142
+
143
+
144
+ def eager_attention_forward(
145
+ module: nn.Module,
146
+ query: torch.Tensor,
147
+ key: torch.Tensor,
148
+ value: torch.Tensor,
149
+ attention_mask: Optional[torch.Tensor],
150
+ scaling: float,
151
+ dropout: float = 0.0,
152
+ **kwargs: Unpack[TransformersKwargs],
153
+ ):
154
+ key_states = repeat_kv(key, module.num_key_value_groups)
155
+ value_states = repeat_kv(value, module.num_key_value_groups)
156
+
157
+ attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling
158
+ if attention_mask is not None:
159
+ causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
160
+ attn_weights = attn_weights + causal_mask
161
+
162
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
163
+ attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
164
+ attn_output = torch.matmul(attn_weights, value_states)
165
+ attn_output = attn_output.transpose(1, 2).contiguous()
166
+
167
+ return attn_output, attn_weights
168
+
169
+
170
+ class Qwen3VLVisionAttention(nn.Module):
171
+ def __init__(self, config: Qwen3VLVisionConfig) -> None:
172
+ super().__init__()
173
+ self.dim = config.hidden_size
174
+ self.num_heads = config.num_heads
175
+ self.head_dim = self.dim // self.num_heads
176
+ self.num_key_value_groups = 1 # needed for eager attention
177
+ self.qkv = nn.Linear(self.dim, self.dim * 3, bias=True)
178
+ self.proj = nn.Linear(self.dim, self.dim)
179
+ self.scaling = self.head_dim**-0.5
180
+ self.config = config
181
+ self.attention_dropout = 0.0
182
+ self.is_causal = False
183
+
184
+ def forward(
185
+ self,
186
+ hidden_states: torch.Tensor,
187
+ cu_seqlens: torch.Tensor,
188
+ rotary_pos_emb: Optional[torch.Tensor] = None,
189
+ position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None,
190
+ **kwargs,
191
+ ) -> torch.Tensor:
192
+ seq_length = hidden_states.shape[0]
193
+ query_states, key_states, value_states = (
194
+ self.qkv(hidden_states).reshape(seq_length, 3, self.num_heads, -1).permute(1, 0, 2, 3).unbind(0)
195
+ )
196
+ cos, sin = position_embeddings
197
+ query_states, key_states = apply_rotary_pos_emb_vision(query_states, key_states, cos, sin)
198
+
199
+ query_states = query_states.transpose(0, 1).unsqueeze(0)
200
+ key_states = key_states.transpose(0, 1).unsqueeze(0)
201
+ value_states = value_states.transpose(0, 1).unsqueeze(0)
202
+
203
+ attention_interface: Callable = eager_attention_forward
204
+ if self.config._attn_implementation != "eager":
205
+ attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
206
+
207
+ if self.config._attn_implementation == "flash_attention_2":
208
+ # Flash Attention 2: Use cu_seqlens for variable length attention
209
+ max_seqlen = (cu_seqlens[1:] - cu_seqlens[:-1]).max()
210
+ attn_output, _ = attention_interface(
211
+ self,
212
+ query_states,
213
+ key_states,
214
+ value_states,
215
+ attention_mask=None,
216
+ scaling=self.scaling,
217
+ dropout=0.0 if not self.training else self.attention_dropout,
218
+ cu_seq_lens_q=cu_seqlens,
219
+ cu_seq_lens_k=cu_seqlens,
220
+ max_length_q=max_seqlen,
221
+ max_length_k=max_seqlen,
222
+ is_causal=False,
223
+ **kwargs,
224
+ )
225
+ else:
226
+ # Other implementations: Process each chunk separately
227
+ lengths = cu_seqlens[1:] - cu_seqlens[:-1]
228
+ splits = [
229
+ torch.split(tensor, lengths.tolist(), dim=2) for tensor in (query_states, key_states, value_states)
230
+ ]
231
+
232
+ attn_outputs = [
233
+ attention_interface(
234
+ self,
235
+ q,
236
+ k,
237
+ v,
238
+ attention_mask=None,
239
+ scaling=self.scaling,
240
+ dropout=0.0 if not self.training else self.attention_dropout,
241
+ is_causal=False,
242
+ **kwargs,
243
+ )[0]
244
+ for q, k, v in zip(*splits)
245
+ ]
246
+ attn_output = torch.cat(attn_outputs, dim=1)
247
+
248
+ attn_output = attn_output.reshape(seq_length, -1).contiguous()
249
+ attn_output = self.proj(attn_output)
250
+ return attn_output
251
+
252
+
253
+ class Qwen3VLVisionBlock(GradientCheckpointingLayer):
254
+ def __init__(self, config, attn_implementation: str = "sdpa") -> None:
255
+ super().__init__()
256
+ self.norm1 = nn.LayerNorm(config.hidden_size, eps=1e-6)
257
+ self.norm2 = nn.LayerNorm(config.hidden_size, eps=1e-6)
258
+ self.attn = Qwen3VLVisionAttention(config=config)
259
+ self.mlp = Qwen3VLVisionMLP(config=config)
260
+
261
+ def forward(
262
+ self,
263
+ hidden_states: torch.Tensor,
264
+ cu_seqlens: torch.Tensor,
265
+ rotary_pos_emb: Optional[torch.Tensor] = None,
266
+ position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None,
267
+ **kwargs,
268
+ ) -> torch.Tensor:
269
+ hidden_states = hidden_states + self.attn(
270
+ self.norm1(hidden_states),
271
+ cu_seqlens=cu_seqlens,
272
+ rotary_pos_emb=rotary_pos_emb,
273
+ position_embeddings=position_embeddings,
274
+ **kwargs,
275
+ )
276
+ hidden_states = hidden_states + self.mlp(self.norm2(hidden_states))
277
+ return hidden_states
278
+
279
+
280
+ class Qwen3VLTextRotaryEmbedding(nn.Module):
281
+ inv_freq: torch.Tensor # fix linting for `register_buffer`
282
+
283
+ def __init__(self, config: LimeQwen3VLTextConfig, device=None):
284
+ super().__init__()
285
+ if hasattr(config, "rope_scaling") and config.rope_scaling is not None:
286
+ self.rope_type = config.rope_scaling.get("rope_type", "default")
287
+ else:
288
+ self.rope_type = "default"
289
+ self.max_seq_len_cached = config.max_position_embeddings
290
+ self.original_max_seq_len = config.max_position_embeddings
291
+
292
+ self.config = config
293
+ self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
294
+
295
+ inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device)
296
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
297
+ self.original_inv_freq = self.inv_freq
298
+
299
+ self.mrope_section = config.rope_scaling.get("mrope_section", [24, 20, 20])
300
+
301
+ def apply_interleaved_mrope(self, freqs, mrope_section):
302
+ """Apply interleaved MRoPE to 3D rotary embeddings.
303
+ Reorganizes frequency layout from chunked [TTT...HHH...WWW] to
304
+ interleaved [THTHWHTHW...TT], preserving frequency continuity.
305
+ args:
306
+ x: (3, bs, seq_len, head_dim // 2)
307
+ mrope_section: (3,)
308
+ returns:
309
+ x_t: (bs, seq_len, head_dim // 2)
310
+ """
311
+ freqs_t = freqs[0] # just overwrite the first dimension T
312
+ for dim, offset in enumerate((1, 2), start=1): # H, W
313
+ length = mrope_section[dim] * 3
314
+ idx = slice(offset, length, 3)
315
+ freqs_t[..., idx] = freqs[dim, ..., idx]
316
+ return freqs_t
317
+
318
+ @torch.no_grad()
319
+ @dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope)
320
+ def forward(self, x, position_ids):
321
+ # In contrast to other models, Qwen3VL has different position ids for the grids
322
+ # So we expand the inv_freq to shape (3, ...)
323
+ if position_ids.ndim == 2:
324
+ position_ids = position_ids[None, ...].expand(3, position_ids.shape[0], -1)
325
+ inv_freq_expanded = self.inv_freq[None, None, :, None].float().expand(3, position_ids.shape[1], -1, 1)
326
+ position_ids_expanded = position_ids[:, :, None, :].float() # shape (3, bs, 1, positions)
327
+
328
+ device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu"
329
+ with torch.autocast(device_type=device_type, enabled=False): # Force float32
330
+ freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(2, 3)
331
+ freqs = self.apply_interleaved_mrope(freqs, self.mrope_section)
332
+ emb = torch.cat((freqs, freqs), dim=-1)
333
+ cos = emb.cos() * self.attention_scaling
334
+ sin = emb.sin() * self.attention_scaling
335
+
336
+ return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
337
+
338
+
339
+ @use_kernel_forward_from_hub("RMSNorm")
340
+ class Qwen3VLTextRMSNorm(nn.Module):
341
+ def __init__(self, hidden_size, eps: float = 1e-6) -> None:
342
+ """
343
+ Qwen3VLTextRMSNorm is equivalent to T5LayerNorm
344
+ """
345
+ super().__init__()
346
+ self.weight = nn.Parameter(torch.ones(hidden_size))
347
+ self.variance_epsilon = eps
348
+
349
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
350
+ input_dtype = hidden_states.dtype
351
+ hidden_states = hidden_states.to(torch.float32)
352
+ variance = hidden_states.pow(2).mean(-1, keepdim=True)
353
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
354
+ return self.weight * hidden_states.to(input_dtype)
355
+
356
+ def extra_repr(self):
357
+ return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}"
358
+
359
+ class LimeSimpleRMSNorm(nn.Module):
360
+ """
361
+ Affine-less RMSNorm.
362
+ No learnable weight, no bias. Just pure normalization.
363
+ """
364
+ def __init__(self, hidden_size = None, eps: float = 1e-6) -> None:
365
+ super().__init__()
366
+ # hidden_size 参数在这里没用,但为了保持接口一致性可以留着
367
+ self.variance_epsilon = eps
368
+
369
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
370
+ input_dtype = hidden_states.dtype
371
+ hidden_states = hidden_states.to(torch.float32)
372
+ variance = hidden_states.pow(2).mean(-1, keepdim=True)
373
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
374
+ return hidden_states.to(input_dtype)
375
+
376
+
377
+ def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
378
+ """Applies Rotary Position Embedding to the query and key tensors.
379
+
380
+ Args:
381
+ q (`torch.Tensor`): The query tensor.
382
+ k (`torch.Tensor`): The key tensor.
383
+ cos (`torch.Tensor`): The cosine part of the rotary embedding.
384
+ sin (`torch.Tensor`): The sine part of the rotary embedding.
385
+ position_ids (`torch.Tensor`, *optional*):
386
+ Deprecated and unused.
387
+ unsqueeze_dim (`int`, *optional*, defaults to 1):
388
+ The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
389
+ sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
390
+ that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
391
+ k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
392
+ cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
393
+ the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
394
+ Returns:
395
+ `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
396
+ """
397
+ cos = cos.unsqueeze(unsqueeze_dim)
398
+ sin = sin.unsqueeze(unsqueeze_dim)
399
+ q_embed = (q * cos) + (rotate_half(q) * sin)
400
+ k_embed = (k * cos) + (rotate_half(k) * sin)
401
+ return q_embed, k_embed
402
+
403
+
404
+ class Qwen3VLTextAttention(nn.Module):
405
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
406
+
407
+ def __init__(self, config: LimeQwen3VLTextConfig, layer_idx: int):
408
+ super().__init__()
409
+ self.config = config
410
+ self.layer_idx = layer_idx
411
+ self.head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads)
412
+ self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
413
+ self.scaling = self.head_dim**-0.5
414
+ self.attention_dropout = config.attention_dropout
415
+ self.is_causal = True
416
+
417
+ self.q_proj = nn.Linear(
418
+ config.hidden_size, config.num_attention_heads * self.head_dim, bias=config.attention_bias
419
+ )
420
+ self.k_proj = nn.Linear(
421
+ config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias
422
+ )
423
+ self.v_proj = nn.Linear(
424
+ config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias
425
+ )
426
+ self.o_proj = nn.Linear(
427
+ config.num_attention_heads * self.head_dim, config.hidden_size, bias=config.attention_bias
428
+ )
429
+ self.q_norm = Qwen3VLTextRMSNorm(self.head_dim, eps=config.rms_norm_eps) # unlike olmo, only on the head dim!
430
+ self.k_norm = Qwen3VLTextRMSNorm(
431
+ self.head_dim, eps=config.rms_norm_eps
432
+ ) # thus post q_norm does not need reshape
433
+
434
+ @deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58")
435
+ def forward(
436
+ self,
437
+ hidden_states: torch.Tensor,
438
+ position_embeddings: tuple[torch.Tensor, torch.Tensor],
439
+ attention_mask: Optional[torch.Tensor],
440
+ past_key_values: Optional[Cache] = None,
441
+ cache_position: Optional[torch.LongTensor] = None,
442
+ **kwargs: Unpack[FlashAttentionKwargs],
443
+ ) -> tuple[torch.Tensor, Optional[torch.Tensor]]:
444
+ input_shape = hidden_states.shape[:-1]
445
+ hidden_shape = (*input_shape, -1, self.head_dim)
446
+
447
+ query_states = self.q_norm(self.q_proj(hidden_states).view(hidden_shape)).transpose(1, 2)
448
+ key_states = self.k_norm(self.k_proj(hidden_states).view(hidden_shape)).transpose(1, 2)
449
+ value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2)
450
+
451
+ cos, sin = position_embeddings
452
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
453
+
454
+ if past_key_values is not None:
455
+ # sin and cos are specific to RoPE models; cache_position needed for the static cache
456
+ cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
457
+ key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
458
+
459
+ attention_interface: Callable = eager_attention_forward
460
+ if self.config._attn_implementation != "eager":
461
+ attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
462
+
463
+ attn_output, attn_weights = attention_interface(
464
+ self,
465
+ query_states,
466
+ key_states,
467
+ value_states,
468
+ attention_mask,
469
+ dropout=0.0 if not self.training else self.attention_dropout,
470
+ scaling=self.scaling,
471
+ **kwargs,
472
+ )
473
+
474
+ attn_output = attn_output.reshape(*input_shape, -1).contiguous()
475
+ attn_output = self.o_proj(attn_output)
476
+ return attn_output, attn_weights
477
+
478
+
479
+ class Qwen3VLTextMLP(nn.Module):
480
+ def __init__(self, config):
481
+ super().__init__()
482
+ self.config = config
483
+ self.hidden_size = config.hidden_size
484
+ self.intermediate_size = config.intermediate_size
485
+ self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
486
+ self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
487
+ self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
488
+ self.act_fn = ACT2FN[config.hidden_act]
489
+
490
+ def forward(self, x):
491
+ down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
492
+ return down_proj
493
+
494
+
495
+ class LimeCrossAttention(nn.Module):
496
+ """
497
+ Lime Cross Attention: Mimics the Qwen3VLTextAttention structure but operates
498
+ within the reduced 'lime_hidden_size' dimension.
499
+
500
+ Key Architectural Features:
501
+ 1. Source: Query comes from Text (hidden_states), Key/Value from Vision (visual_context).
502
+ 2. No RoPE: Cross-attention relies on content retrieval; positional embeddings are distinct.
503
+ 3. Dimensionality: Operates in the bottleneck dimension (lime_hidden_size) while preserving
504
+ the original attention head resolution (head_dim).
505
+ """
506
+
507
+ def __init__(self, config: LimeQwen3VLTextConfig, layer_idx: int):
508
+ super().__init__()
509
+ self.config = config
510
+ self.layer_idx = layer_idx
511
+
512
+ # --- 1. Dimension Configuration ---
513
+ # Use the reduced dimension (bottleneck size) defined in the config.
514
+ self.hidden_size = config.lime_hidden_size
515
+
516
+ # --- 2. Head Calculation ---
517
+ # Preserve the original 'head_dim' (e.g., 128) to maintain the granularity of
518
+ # attention features. We adjust 'num_heads' to fit the reduced hidden_size.
519
+ original_head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads)
520
+ self.head_dim = original_head_dim
521
+
522
+ # Ensure the reduced dimension is valid for the given head size.
523
+ if self.hidden_size % self.head_dim != 0:
524
+ raise ValueError(
525
+ f"config.lime_hidden_size ({self.hidden_size}) must be divisible by head_dim ({self.head_dim})"
526
+ )
527
+
528
+ self.num_heads = self.hidden_size // self.head_dim
529
+
530
+ # --- 3. Grouped Query Attention (GQA) Adjustment ---
531
+ # Maintain the Key/Value head ratio from the original model.
532
+ # If the dimension is too small, ensure at least one KV head exists.
533
+ original_gqa_ratio = config.num_attention_heads // config.num_key_value_heads
534
+ self.num_key_value_heads = max(1, self.num_heads // original_gqa_ratio)
535
+ self.num_key_value_groups = self.num_heads // self.num_key_value_heads
536
+
537
+ self.scaling = self.head_dim**-0.5
538
+ self.attention_dropout = config.attention_dropout
539
+
540
+ # --- 4. Projection Layers ---
541
+ # Projections operate strictly within the reduced 'lime_hidden_size'.
542
+ self.q_proj = nn.Linear(
543
+ self.hidden_size, self.num_heads * self.head_dim, bias=config.attention_bias
544
+ )
545
+ self.k_proj = nn.Linear(
546
+ self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.attention_bias
547
+ )
548
+ self.v_proj = nn.Linear(
549
+ self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.attention_bias
550
+ )
551
+ self.o_proj = nn.Linear(
552
+ self.num_heads * self.head_dim, self.hidden_size, bias=config.attention_bias
553
+ )
554
+
555
+ # Norm layers applied to the specific head dimensions.
556
+ self.q_norm = Qwen3VLTextRMSNorm(self.head_dim, eps=config.rms_norm_eps)
557
+ self.k_norm = Qwen3VLTextRMSNorm(self.head_dim, eps=config.rms_norm_eps)
558
+
559
+ def forward(
560
+ self,
561
+ hidden_states: torch.Tensor, # Query (Text) in lime_hidden_size
562
+ visual_context: torch.Tensor, # Key/Value (Vision) in lime_hidden_size
563
+ attention_mask: Optional[torch.Tensor] = None,
564
+ ) -> torch.Tensor:
565
+
566
+ # 1. Query Processing (Text Side)
567
+ input_shape = hidden_states.shape[:-1]
568
+ # Reshape to (batch, seq_len, num_heads, head_dim)
569
+ query_shape = (*input_shape, -1, self.head_dim)
570
+
571
+ # Standard Qwen normalization: Norm(Proj(x).view).transpose
572
+ query_states = self.q_norm(self.q_proj(hidden_states).view(query_shape)).transpose(1, 2)
573
+
574
+ # 2. Key/Value Processing (Vision Side)
575
+ visual_shape = visual_context.shape[:-1] # (batch, vis_len)
576
+ kv_shape = (*visual_shape, -1, self.head_dim)
577
+
578
+ # Apply projections and norms for GQA
579
+ key_states = self.k_norm(self.k_proj(visual_context).view(kv_shape)).transpose(1, 2)
580
+ value_states = self.v_proj(visual_context).view(kv_shape).transpose(1, 2)
581
+
582
+ # 3. Repeat KV for GQA
583
+ # Explicitly repeat keys/values to match the number of query heads
584
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
585
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
586
+
587
+ # 4. Attention Calculation
588
+ # Cross-modal retrieval (Text-to-Vision).
589
+ # Shape: (batch, num_heads, text_len, vis_len)
590
+ attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) * self.scaling
591
+
592
+ if attention_mask is not None:
593
+ # Apply visual mask (usually broadcasting over batch/heads)
594
+ attn_weights = attn_weights + attention_mask
595
+
596
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
597
+ attn_weights = nn.functional.dropout(attn_weights, p=self.attention_dropout, training=self.training)
598
+
599
+ attn_output = torch.matmul(attn_weights, value_states)
600
+
601
+ # 5. Output Projection
602
+ attn_output = attn_output.transpose(1, 2).contiguous()
603
+ attn_output = attn_output.reshape(*input_shape, -1).contiguous()
604
+ attn_output = self.o_proj(attn_output)
605
+
606
+ return attn_output
607
+
608
+
609
+ class LimeMLP(nn.Module):
610
+ """
611
+ Lime MLP: A SwiGLU MLP adapted for the bottleneck dimension.
612
+ """
613
+ def __init__(self, config):
614
+ super().__init__()
615
+ self.config = config
616
+
617
+ # Operate in the reduced dimension
618
+ self.hidden_size = config.lime_hidden_size
619
+
620
+ # Calculate intermediate size dynamically to maintain the original model's
621
+ # expansion ratio (e.g., scaling factor usually around 3.5x or 4x).
622
+ expansion_ratio = config.intermediate_size / config.hidden_size
623
+ self.intermediate_size = int(self.hidden_size * expansion_ratio)
624
+
625
+ # Gate, Up, and Down projections without bias (following Qwen conventions)
626
+ self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
627
+ self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
628
+ self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
629
+ self.act_fn = ACT2FN[config.hidden_act]
630
+
631
+ def forward(self, x):
632
+ # SwiGLU activation mechanism
633
+ down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
634
+ return down_proj
635
+
636
+
637
+ class LimeMemoryBlock(nn.Module):
638
+ """
639
+ Lime Memory Block (Bottleneck Adapter Architecture).
640
+
641
+ Flow:
642
+ Input (High-Dim) -> Down-Projection -> [Cross-Attn -> MLP] (Low-Dim) -> Up-Projection -> Gated Residual
643
+
644
+ This design reduces parameter count and computation by performing dense operations
645
+ in a compressed latent space ('lime_hidden_size').
646
+ """
647
+
648
+ def __init__(self, config, layer_idx):
649
+ super().__init__()
650
+
651
+ # 1. Dimension Definitions
652
+ self.orig_hidden_size = config.hidden_size
653
+ self.lime_dim = config.lime_hidden_size # The bottleneck dimension (e.g., 1024)
654
+
655
+ # 2. Down-Projection Layers (Compression)
656
+ # Project Text and Vision features from original dimension to bottleneck dimension.
657
+ self.text_reducer = nn.Linear(self.orig_hidden_size, self.lime_dim, bias=False)
658
+ self.vision_reducer = nn.Linear(self.orig_hidden_size, self.lime_dim, bias=False)
659
+
660
+ # 3. Up-Projection Layer (Restoration)
661
+ # Project features back to the original dimension for residual addition.
662
+ self.output_expander = nn.Linear(self.lime_dim, self.orig_hidden_size, bias=False)
663
+
664
+ # 4. Internal Components (Bottleneck Space)
665
+ # Norms and Core layers operate in 'lime_dim'.
666
+ self.input_norm = Qwen3VLTextRMSNorm(self.lime_dim, eps=config.rms_norm_eps)
667
+ self.post_attn_norm = Qwen3VLTextRMSNorm(self.lime_dim, eps=config.rms_norm_eps)
668
+
669
+ # These modules automatically utilize config.lime_hidden_size internally
670
+ self.cross_attn = LimeCrossAttention(config, layer_idx)
671
+ self.mlp = LimeMLP(config)
672
+
673
+ # --- 修改点: 使用无参数的 RMSNorm ---
674
+ # 即使 config 传进去了 hidden_size,这个类也会忽略它,不会创建 Parameter
675
+ self.output_norm = LimeSimpleRMSNorm(eps=config.rms_norm_eps)
676
+
677
+ # Learnable Gating Parameter
678
+ self.gate_alpha = nn.Parameter(torch.zeros(1))
679
+
680
+ # 5. Zero-Initialization Strategy
681
+ # Initialize the final output projection to zero. This ensures the module
682
+ # acts as an identity function (output=0) at the start of training,
683
+ # preventing disruption to the pre-trained model's features.
684
+ nn.init.zeros_(self.output_expander.weight)
685
+
686
+ def forward(self, hidden_states, visual_context, visual_mask=None, query_is_visual_mask=None):
687
+ """
688
+ Args:
689
+ hidden_states: Text Context [Batch, SeqLen, Orig_Dim]
690
+ visual_context: Visual Memory [Batch, VisLen, Orig_Dim]
691
+ visual_mask: Attention mask for visual memory.
692
+ query_is_visual_mask: Mask to silence injection on visual query tokens.
693
+ """
694
+
695
+ # --- 短路保护 ---
696
+ # 如果没有视觉上下文,或者长度为0,直接不做任何计算,返回0(即不影响残差)
697
+ if visual_context is None or visual_context.numel() == 0:
698
+ return torch.zeros_like(hidden_states)
699
+ # ----------------------------
700
+
701
+ # 1. Down-Projection (High Rank -> Low Rank)
702
+ # Shapes become [Batch, ..., Lime_Dim]
703
+ small_hidden = self.text_reducer(hidden_states)
704
+ small_visual = self.vision_reducer(visual_context)
705
+
706
+ # 2. Cross Attention (Low Rank)
707
+ normed_hidden = self.input_norm(small_hidden)
708
+ # Pass the compressed visual context to attention
709
+ m_raw = self.cross_attn(normed_hidden, small_visual, attention_mask=visual_mask)
710
+
711
+ # 3. MLP Generation (Low Rank)
712
+ normed_m = self.post_attn_norm(m_raw)
713
+ m_latent = m_raw + self.mlp(normed_m)
714
+
715
+ # 4. Up-Projection (Low Rank -> High Rank)
716
+ # Restore shape to [Batch, SeqLen, Orig_Dim]
717
+ m_restored = self.output_expander(m_latent)
718
+
719
+ # 5. Gated Injection
720
+ # Apply learnable gate to the restored signal
721
+ m_normalized = self.output_norm(m_restored)
722
+ output = self.gate_alpha * m_normalized
723
+
724
+ # 6. Vision Token Silencing
725
+ # If the current query token is a visual token (indicated by mask),
726
+ # suppress the output to zero to avoid contaminating visual representations.
727
+ if query_is_visual_mask is not None:
728
+ # Expand mask to match feature dimension: [Batch, SeqLen, 1]
729
+ silence_mask = (1.0 - query_is_visual_mask.to(output.dtype).unsqueeze(-1))
730
+ output = output * silence_mask
731
+
732
+ return output
733
+
734
+
735
+ class Qwen3VLTextDecoderLayer(GradientCheckpointingLayer):
736
+ def __init__(self, config: LimeQwen3VLTextConfig, layer_idx: int):
737
+ super().__init__()
738
+ self.hidden_size = config.hidden_size
739
+
740
+ self.self_attn = Qwen3VLTextAttention(config=config, layer_idx=layer_idx)
741
+
742
+ self.mlp = Qwen3VLTextMLP(config)
743
+ self.input_layernorm = Qwen3VLTextRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
744
+ self.post_attention_layernorm = Qwen3VLTextRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
745
+
746
+ # --- LIME MODIFICATION ---
747
+ self.lime_block = None
748
+ lime_layers = getattr(config, "lime_layers", None)
749
+
750
+ if lime_layers is not None and layer_idx in lime_layers:
751
+ self.lime_block = LimeMemoryBlock(config, layer_idx)
752
+ # -------------------------
753
+
754
+ @deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58")
755
+ def forward(
756
+ self,
757
+ hidden_states: torch.Tensor,
758
+ position_embeddings: tuple[torch.Tensor, torch.Tensor],
759
+ attention_mask: Optional[torch.Tensor] = None,
760
+ position_ids: Optional[torch.LongTensor] = None,
761
+ past_key_values: Optional[Cache] = None,
762
+ use_cache: Optional[bool] = False,
763
+ cache_position: Optional[torch.LongTensor] = None,
764
+ # --- 🚨 LIME ARGUMENTS 🚨 ---
765
+ lime_visual_context: Optional[torch.Tensor] = None,
766
+ lime_visual_mask: Optional[torch.Tensor] = None,
767
+ visual_pos_masks: Optional[torch.Tensor] = None, # Reusing DeepStack mask: True=Visual, False=Text
768
+ # ----------------------------
769
+ **kwargs: Unpack[TransformersKwargs],
770
+ ) -> torch.Tensor:
771
+ residual = hidden_states
772
+ hidden_states = self.input_layernorm(hidden_states)
773
+ # Self Attention
774
+ hidden_states, _ = self.self_attn(
775
+ hidden_states=hidden_states,
776
+ attention_mask=attention_mask,
777
+ position_ids=position_ids,
778
+ past_key_values=past_key_values,
779
+ use_cache=use_cache,
780
+ cache_position=cache_position,
781
+ position_embeddings=position_embeddings,
782
+ **kwargs,
783
+ )
784
+ hidden_states = residual + hidden_states
785
+
786
+ # Fully Connected
787
+ residual = hidden_states
788
+ hidden_states = self.post_attention_layernorm(hidden_states)
789
+ hidden_states = self.mlp(hidden_states)
790
+
791
+ # --- 🚨 LIME INJECTION START 🚨 ---
792
+ if self.lime_block is not None and lime_visual_context is not None:
793
+ lime_update = self.lime_block(
794
+ hidden_states=residual,
795
+ visual_context=lime_visual_context,
796
+ visual_mask=lime_visual_mask,
797
+ query_is_visual_mask=visual_pos_masks # Pass the mask here
798
+ )
799
+ hidden_states = hidden_states + lime_update
800
+ # --- 🚨 LIME INJECTION END 🚨 ---
801
+
802
+ hidden_states = residual + hidden_states
803
+ return hidden_states
804
+
805
+
806
+ @dataclass
807
+ @auto_docstring(
808
+ custom_intro="""
809
+ Base class for Llava outputs, with hidden states and attentions.
810
+ """
811
+ )
812
+ class Qwen3VLModelOutputWithPast(ModelOutput):
813
+ r"""
814
+ past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
815
+ It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
816
+
817
+ Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
818
+ `past_key_values` input) to speed up sequential decoding.
819
+ rope_deltas (`torch.LongTensor` of shape `(batch_size, )`, *optional*):
820
+ The rope index difference between sequence length and multimodal rope.
821
+ """
822
+
823
+ last_hidden_state: Optional[torch.FloatTensor] = None
824
+ past_key_values: Optional[Cache] = None
825
+ hidden_states: Optional[tuple[torch.FloatTensor]] = None
826
+ attentions: Optional[tuple[torch.FloatTensor]] = None
827
+ rope_deltas: Optional[torch.LongTensor] = None
828
+
829
+
830
+ @auto_docstring
831
+ class Qwen3VLPreTrainedModel(PreTrainedModel):
832
+ config: LimeQwen3VLConfig
833
+ base_model_prefix = "model"
834
+ supports_gradient_checkpointing = True
835
+ _no_split_modules = ["Qwen3VLTextDecoderLayer", "Qwen3VLVisionBlock"]
836
+ _skip_keys_device_placement = "past_key_values"
837
+ _supports_flash_attn = True
838
+ _supports_sdpa = True
839
+
840
+ _can_compile_fullgraph = True
841
+ _supports_attention_backend = True
842
+ _can_record_outputs = {
843
+ "hidden_states": Qwen3VLTextDecoderLayer,
844
+ "attentions": Qwen3VLTextAttention,
845
+ }
846
+
847
+
848
+ class Qwen3VLVisionModel(Qwen3VLPreTrainedModel):
849
+ config: Qwen3VLVisionConfig
850
+ _no_split_modules = ["Qwen3VLVisionBlock"]
851
+
852
+ def __init__(self, config, *inputs, **kwargs) -> None:
853
+ super().__init__(config, *inputs, **kwargs)
854
+ self.spatial_merge_size = config.spatial_merge_size
855
+ self.patch_size = config.patch_size
856
+ self.spatial_merge_unit = self.spatial_merge_size * self.spatial_merge_size
857
+
858
+ self.patch_embed = Qwen3VLVisionPatchEmbed(
859
+ config=config,
860
+ )
861
+
862
+ self.pos_embed = nn.Embedding(config.num_position_embeddings, config.hidden_size)
863
+ self.num_grid_per_side = int(config.num_position_embeddings**0.5)
864
+
865
+ head_dim = config.hidden_size // config.num_heads
866
+ self.rotary_pos_emb = Qwen3VLVisionRotaryEmbedding(head_dim // 2)
867
+
868
+ self.blocks = nn.ModuleList([Qwen3VLVisionBlock(config) for _ in range(config.depth)])
869
+ self.merger = Qwen3VLVisionPatchMerger(
870
+ config=config,
871
+ use_postshuffle_norm=False,
872
+ )
873
+
874
+ self.deepstack_visual_indexes = config.deepstack_visual_indexes
875
+ self.deepstack_merger_list = nn.ModuleList(
876
+ [
877
+ Qwen3VLVisionPatchMerger(
878
+ config=config,
879
+ use_postshuffle_norm=True,
880
+ )
881
+ for _ in range(len(config.deepstack_visual_indexes))
882
+ ]
883
+ )
884
+
885
+ self.gradient_checkpointing = False
886
+
887
+ def rot_pos_emb(self, grid_thw: torch.Tensor) -> torch.Tensor:
888
+ merge_size = self.spatial_merge_size
889
+
890
+ max_hw = int(grid_thw[:, 1:].max().item())
891
+ freq_table = self.rotary_pos_emb(max_hw) # (max_hw, dim // 2)
892
+ device = freq_table.device
893
+
894
+ total_tokens = int(torch.prod(grid_thw, dim=1).sum().item())
895
+ pos_ids = torch.empty((total_tokens, 2), dtype=torch.long, device=device)
896
+
897
+ offset = 0
898
+ for num_frames, height, width in grid_thw:
899
+ merged_h, merged_w = height // merge_size, width // merge_size
900
+
901
+ block_rows = torch.arange(merged_h, device=device) # block row indices
902
+ block_cols = torch.arange(merged_w, device=device) # block col indices
903
+ intra_row = torch.arange(merge_size, device=device) # intra-block row offsets
904
+ intra_col = torch.arange(merge_size, device=device) # intra-block col offsets
905
+
906
+ # Compute full-resolution positions
907
+ row_idx = block_rows[:, None, None, None] * merge_size + intra_row[None, None, :, None]
908
+ col_idx = block_cols[None, :, None, None] * merge_size + intra_col[None, None, None, :]
909
+
910
+ row_idx = row_idx.expand(merged_h, merged_w, merge_size, merge_size).reshape(-1)
911
+ col_idx = col_idx.expand(merged_h, merged_w, merge_size, merge_size).reshape(-1)
912
+
913
+ coords = torch.stack((row_idx, col_idx), dim=-1)
914
+
915
+ if num_frames > 1:
916
+ coords = coords.repeat(num_frames, 1)
917
+
918
+ num_tokens = coords.shape[0]
919
+ pos_ids[offset : offset + num_tokens] = coords
920
+ offset += num_tokens
921
+
922
+ embeddings = freq_table[pos_ids] # lookup rotary embeddings
923
+ embeddings = embeddings.flatten(1)
924
+ return embeddings
925
+
926
+ def fast_pos_embed_interpolate(self, grid_thw):
927
+ grid_ts, grid_hs, grid_ws = grid_thw[:, 0], grid_thw[:, 1], grid_thw[:, 2]
928
+
929
+ idx_list = [[] for _ in range(4)]
930
+ weight_list = [[] for _ in range(4)]
931
+
932
+ for t, h, w in zip(grid_ts, grid_hs, grid_ws):
933
+ h_idxs = torch.linspace(0, self.num_grid_per_side - 1, h)
934
+ w_idxs = torch.linspace(0, self.num_grid_per_side - 1, w)
935
+
936
+ h_idxs_floor = h_idxs.int()
937
+ w_idxs_floor = w_idxs.int()
938
+ h_idxs_ceil = (h_idxs.int() + 1).clip(max=self.num_grid_per_side - 1)
939
+ w_idxs_ceil = (w_idxs.int() + 1).clip(max=self.num_grid_per_side - 1)
940
+
941
+ dh = h_idxs - h_idxs_floor
942
+ dw = w_idxs - w_idxs_floor
943
+
944
+ base_h = h_idxs_floor * self.num_grid_per_side
945
+ base_h_ceil = h_idxs_ceil * self.num_grid_per_side
946
+
947
+ indices = [
948
+ (base_h[None].T + w_idxs_floor[None]).flatten(),
949
+ (base_h[None].T + w_idxs_ceil[None]).flatten(),
950
+ (base_h_ceil[None].T + w_idxs_floor[None]).flatten(),
951
+ (base_h_ceil[None].T + w_idxs_ceil[None]).flatten(),
952
+ ]
953
+
954
+ weights = [
955
+ ((1 - dh)[None].T * (1 - dw)[None]).flatten(),
956
+ ((1 - dh)[None].T * dw[None]).flatten(),
957
+ (dh[None].T * (1 - dw)[None]).flatten(),
958
+ (dh[None].T * dw[None]).flatten(),
959
+ ]
960
+
961
+ for i in range(4):
962
+ idx_list[i].extend(indices[i].tolist())
963
+ weight_list[i].extend(weights[i].tolist())
964
+
965
+ idx_tensor = torch.tensor(idx_list, dtype=torch.long, device=self.pos_embed.weight.device)
966
+ weight_tensor = torch.tensor(
967
+ weight_list, dtype=self.pos_embed.weight.dtype, device=self.pos_embed.weight.device
968
+ )
969
+ pos_embeds = self.pos_embed(idx_tensor) * weight_tensor[:, :, None]
970
+ patch_pos_embeds = pos_embeds[0] + pos_embeds[1] + pos_embeds[2] + pos_embeds[3]
971
+
972
+ patch_pos_embeds = patch_pos_embeds.split([h * w for h, w in zip(grid_hs, grid_ws)])
973
+
974
+ patch_pos_embeds_permute = []
975
+ merge_size = self.config.spatial_merge_size
976
+ for pos_embed, t, h, w in zip(patch_pos_embeds, grid_ts, grid_hs, grid_ws):
977
+ pos_embed = pos_embed.repeat(t, 1)
978
+ pos_embed = (
979
+ pos_embed.view(t, h // merge_size, merge_size, w // merge_size, merge_size, -1)
980
+ .permute(0, 1, 3, 2, 4, 5)
981
+ .flatten(0, 4)
982
+ )
983
+ patch_pos_embeds_permute.append(pos_embed)
984
+ patch_pos_embeds = torch.cat(patch_pos_embeds_permute)
985
+ return patch_pos_embeds
986
+
987
+ def forward(self, hidden_states: torch.Tensor, grid_thw: torch.Tensor, **kwargs) -> torch.Tensor:
988
+ """
989
+ Args:
990
+ hidden_states (`torch.Tensor` of shape `(seq_len, hidden_size)`):
991
+ The final hidden states of the model.
992
+ grid_thw (`torch.Tensor` of shape `(num_images_or_videos, 3)`):
993
+ The temporal, height and width of feature shape of each image in LLM.
994
+
995
+ Returns:
996
+ `torch.Tensor`: hidden_states.
997
+ """
998
+ hidden_states = self.patch_embed(hidden_states)
999
+
1000
+ pos_embeds = self.fast_pos_embed_interpolate(grid_thw)
1001
+ hidden_states = hidden_states + pos_embeds
1002
+
1003
+ rotary_pos_emb = self.rot_pos_emb(grid_thw)
1004
+
1005
+ seq_len, _ = hidden_states.size()
1006
+ hidden_states = hidden_states.reshape(seq_len, -1)
1007
+ rotary_pos_emb = rotary_pos_emb.reshape(seq_len, -1)
1008
+ emb = torch.cat((rotary_pos_emb, rotary_pos_emb), dim=-1)
1009
+ position_embeddings = (emb.cos(), emb.sin())
1010
+
1011
+ cu_seqlens = torch.repeat_interleave(grid_thw[:, 1] * grid_thw[:, 2], grid_thw[:, 0]).cumsum(
1012
+ dim=0,
1013
+ # Select dtype based on the following factors:
1014
+ # - FA2 requires that cu_seqlens_q must have dtype int32
1015
+ # - torch.onnx.export requires that cu_seqlens_q must have same dtype as grid_thw
1016
+ # See https://github.com/huggingface/transformers/pull/34852 for more information
1017
+ dtype=grid_thw.dtype if torch.jit.is_tracing() else torch.int32,
1018
+ )
1019
+ cu_seqlens = F.pad(cu_seqlens, (1, 0), value=0)
1020
+
1021
+ deepstack_feature_lists = []
1022
+ for layer_num, blk in enumerate(self.blocks):
1023
+ hidden_states = blk(
1024
+ hidden_states,
1025
+ cu_seqlens=cu_seqlens,
1026
+ position_embeddings=position_embeddings,
1027
+ **kwargs,
1028
+ )
1029
+ if layer_num in self.deepstack_visual_indexes:
1030
+ deepstack_feature = self.deepstack_merger_list[self.deepstack_visual_indexes.index(layer_num)](
1031
+ hidden_states
1032
+ )
1033
+ deepstack_feature_lists.append(deepstack_feature)
1034
+
1035
+ hidden_states = self.merger(hidden_states)
1036
+
1037
+ return hidden_states, deepstack_feature_lists
1038
+
1039
+
1040
+ @auto_docstring(
1041
+ custom_intro=(
1042
+ "Text part of Qwen3VL, "
1043
+ "not a pure text-only model, as DeepStack integrates visual features into the early hidden states."
1044
+ )
1045
+ )
1046
+ class Qwen3VLTextModel(Qwen3VLPreTrainedModel):
1047
+ config: LimeQwen3VLTextConfig
1048
+ _no_split_modules = ["Qwen3VLTextDecoderLayer"]
1049
+
1050
+ def __init__(self, config: LimeQwen3VLTextConfig):
1051
+ super().__init__(config)
1052
+ self.padding_idx = config.pad_token_id
1053
+ self.vocab_size = config.vocab_size
1054
+
1055
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
1056
+ self.layers = nn.ModuleList(
1057
+ [Qwen3VLTextDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
1058
+ )
1059
+ self.norm = Qwen3VLTextRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
1060
+ self.rotary_emb = Qwen3VLTextRotaryEmbedding(config=config)
1061
+ self.gradient_checkpointing = False
1062
+
1063
+ # Initialize weights and apply final processing
1064
+ self.post_init()
1065
+
1066
+ @check_model_inputs()
1067
+ @auto_docstring
1068
+ def forward(
1069
+ self,
1070
+ input_ids: Optional[torch.LongTensor] = None,
1071
+ attention_mask: Optional[torch.Tensor] = None,
1072
+ position_ids: Optional[torch.LongTensor] = None,
1073
+ past_key_values: Optional[Cache] = None,
1074
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1075
+ use_cache: Optional[bool] = None,
1076
+ cache_position: Optional[torch.LongTensor] = None,
1077
+ # args for deepstack
1078
+ visual_pos_masks: Optional[torch.Tensor] = None,
1079
+ deepstack_visual_embeds: Optional[list[torch.Tensor]] = None,
1080
+ # --- 🚨 LIME ARGUMENT START 🚨 ---
1081
+ lime_visual_context: Optional[torch.Tensor] = None,
1082
+ lime_visual_mask: Optional[torch.Tensor] = None,
1083
+ # --- 🚨 LIME ARGUMENT END 🚨 ---
1084
+ **kwargs: Unpack[FlashAttentionKwargs],
1085
+ ) -> Union[tuple, BaseModelOutputWithPast]:
1086
+ r"""
1087
+ visual_pos_masks (`torch.Tensor` of shape `(batch_size, seqlen)`, *optional*):
1088
+ The mask of the visual positions.
1089
+ deepstack_visual_embeds (`list[torch.Tensor]`, *optional*):
1090
+ The deepstack visual embeddings. The shape is (num_layers, visual_seqlen, embed_dim).
1091
+ The feature is extracted from the different visual encoder layers, and fed to the decoder
1092
+ hidden states. It's from the paper DeepStack(https://arxiv.org/abs/2406.04334).
1093
+ lime_visual_context (`torch.Tensor` of shape `(batch_size, vis_seqlen, hidden_size)`, *optional*):
1094
+ The visual context for Lime cross-attention.
1095
+ lime_visual_mask (`torch.Tensor` of shape `(batch_size, 1, 1, vis_seqlen)`, *optional*):
1096
+ The attention mask for Lime cross-attention
1097
+ """
1098
+ if (input_ids is None) ^ (inputs_embeds is not None):
1099
+ raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
1100
+
1101
+ # torch.jit.trace() doesn't support cache objects in the output
1102
+ if use_cache and past_key_values is None and not torch.jit.is_tracing():
1103
+ past_key_values = DynamicCache(config=self.config)
1104
+
1105
+ if inputs_embeds is None:
1106
+ inputs_embeds = self.embed_tokens(input_ids)
1107
+
1108
+ if cache_position is None:
1109
+ past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
1110
+ cache_position = torch.arange(
1111
+ past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
1112
+ )
1113
+
1114
+ # the hard coded `3` is for temporal, height and width.
1115
+ if position_ids is None:
1116
+ position_ids = cache_position.view(1, 1, -1).expand(3, inputs_embeds.shape[0], -1)
1117
+ elif position_ids.ndim == 2:
1118
+ position_ids = position_ids[None, ...].expand(3, position_ids.shape[0], -1)
1119
+
1120
+ if position_ids.ndim == 3 and position_ids.shape[0] == 4:
1121
+ text_position_ids = position_ids[0]
1122
+ position_ids = position_ids[1:]
1123
+ else:
1124
+ text_position_ids = position_ids[0]
1125
+
1126
+ attention_mask = create_causal_mask(
1127
+ config=self.config,
1128
+ input_embeds=inputs_embeds,
1129
+ attention_mask=attention_mask,
1130
+ cache_position=cache_position,
1131
+ past_key_values=past_key_values,
1132
+ position_ids=text_position_ids,
1133
+ )
1134
+
1135
+ hidden_states = inputs_embeds
1136
+
1137
+ # create position embeddings to be shared across the decoder layers
1138
+ position_embeddings = self.rotary_emb(hidden_states, position_ids)
1139
+
1140
+ # decoder layers
1141
+ for layer_idx, decoder_layer in enumerate(self.layers):
1142
+ layer_outputs = decoder_layer(
1143
+ hidden_states,
1144
+ attention_mask=attention_mask,
1145
+ position_ids=text_position_ids,
1146
+ past_key_values=past_key_values,
1147
+ cache_position=cache_position,
1148
+ position_embeddings=position_embeddings,
1149
+ # --- 🚨 透传 LIME 参数 🚨 ---
1150
+ lime_visual_context=lime_visual_context,
1151
+ lime_visual_mask=lime_visual_mask,
1152
+ # Pass visual_pos_masks to ALL layers for Lime silencing
1153
+ visual_pos_masks=visual_pos_masks,
1154
+ # -------------------------
1155
+ **kwargs,
1156
+ )
1157
+ hidden_states = layer_outputs
1158
+
1159
+ # add visual features to the hidden states of first several layers
1160
+ if deepstack_visual_embeds is not None and layer_idx in range(len(deepstack_visual_embeds)):
1161
+ hidden_states = self._deepstack_process(
1162
+ hidden_states,
1163
+ visual_pos_masks,
1164
+ deepstack_visual_embeds[layer_idx],
1165
+ )
1166
+
1167
+ hidden_states = self.norm(hidden_states)
1168
+
1169
+ return BaseModelOutputWithPast(
1170
+ last_hidden_state=hidden_states,
1171
+ past_key_values=past_key_values,
1172
+ )
1173
+
1174
+ def _deepstack_process(
1175
+ self, hidden_states: torch.Tensor, visual_pos_masks: torch.Tensor, visual_embeds: torch.Tensor
1176
+ ):
1177
+ visual_pos_masks = visual_pos_masks.to(hidden_states.device)
1178
+ visual_embeds = visual_embeds.to(hidden_states.device, hidden_states.dtype)
1179
+ local_this = hidden_states[visual_pos_masks, :].clone() + visual_embeds
1180
+ hidden_states[visual_pos_masks, :] = local_this
1181
+ return hidden_states
1182
+
1183
+
1184
+ @auto_docstring
1185
+ class LimeQwen3VLModel(Qwen3VLPreTrainedModel):
1186
+ base_model_prefix = ""
1187
+ _checkpoint_conversion_mapping = {}
1188
+ # Reference: fix gemma3 grad acc #37208
1189
+ accepts_loss_kwargs = False
1190
+ config: LimeQwen3VLConfig
1191
+ _no_split_modules = ["Qwen3VLTextDecoderLayer", "Qwen3VLVisionBlock"]
1192
+
1193
+ def __init__(self, config):
1194
+ super().__init__(config)
1195
+ self.visual = Qwen3VLVisionModel._from_config(config.vision_config)
1196
+ self.language_model = Qwen3VLTextModel._from_config(config.text_config)
1197
+ self.rope_deltas = None # cache rope_deltas here
1198
+
1199
+ # --- 🚨 LIME STATE CACHE 🚨 ---
1200
+ self.lime_visual_context_cache = None # 新增:缓存视觉记忆
1201
+ self.lime_visual_mask_cache = None # 新增:缓存视觉Mask
1202
+ # -----------------------------
1203
+
1204
+ # Initialize weights and apply final processing
1205
+ self.post_init()
1206
+
1207
+ def get_input_embeddings(self):
1208
+ return self.language_model.get_input_embeddings()
1209
+
1210
+ def set_input_embeddings(self, value):
1211
+ self.language_model.set_input_embeddings(value)
1212
+
1213
+ def set_decoder(self, decoder):
1214
+ self.language_model = decoder
1215
+
1216
+ def get_decoder(self):
1217
+ return self.language_model
1218
+
1219
+ def get_rope_index(
1220
+ self,
1221
+ input_ids: Optional[torch.LongTensor] = None,
1222
+ image_grid_thw: Optional[torch.LongTensor] = None,
1223
+ video_grid_thw: Optional[torch.LongTensor] = None,
1224
+ attention_mask: Optional[torch.Tensor] = None,
1225
+ ) -> tuple[torch.Tensor, torch.Tensor]:
1226
+ """Different from the original implementation, Qwen3VL use timestamps rather than absolute time position ids."""
1227
+
1228
+ # Since we use timestamps to seperate videos, like <t1> <vision_start> <frame1> <vision_end> <t2> <vision_start> <frame2> <vision_end>, the video_grid_thw should also be split
1229
+ if video_grid_thw is not None:
1230
+ video_grid_thw = torch.repeat_interleave(video_grid_thw, video_grid_thw[:, 0], dim=0)
1231
+ video_grid_thw[:, 0] = 1
1232
+
1233
+ spatial_merge_size = self.config.vision_config.spatial_merge_size
1234
+ image_token_id = self.config.image_token_id
1235
+ video_token_id = self.config.video_token_id
1236
+ vision_start_token_id = self.config.vision_start_token_id
1237
+ mrope_position_deltas = []
1238
+ if input_ids is not None and (image_grid_thw is not None or video_grid_thw is not None):
1239
+ total_input_ids = input_ids
1240
+ if attention_mask is None:
1241
+ attention_mask = torch.ones_like(total_input_ids)
1242
+ position_ids = torch.ones(
1243
+ 3,
1244
+ input_ids.shape[0],
1245
+ input_ids.shape[1],
1246
+ dtype=input_ids.dtype,
1247
+ device=input_ids.device,
1248
+ )
1249
+ image_index, video_index = 0, 0
1250
+ attention_mask = attention_mask.to(total_input_ids.device)
1251
+ for i, input_ids in enumerate(total_input_ids):
1252
+ input_ids = input_ids[attention_mask[i] == 1]
1253
+ image_nums, video_nums = 0, 0
1254
+ vision_start_indices = torch.argwhere(input_ids == vision_start_token_id).squeeze(1)
1255
+ vision_tokens = input_ids[vision_start_indices + 1]
1256
+ image_nums = (vision_tokens == image_token_id).sum()
1257
+ video_nums = (vision_tokens == video_token_id).sum()
1258
+ input_tokens = input_ids.tolist()
1259
+ llm_pos_ids_list: list = []
1260
+ st = 0
1261
+ remain_images, remain_videos = image_nums, video_nums
1262
+ for _ in range(image_nums + video_nums):
1263
+ if image_token_id in input_tokens and remain_images > 0:
1264
+ ed_image = input_tokens.index(image_token_id, st)
1265
+ else:
1266
+ ed_image = len(input_tokens) + 1
1267
+ if video_token_id in input_tokens and remain_videos > 0:
1268
+ ed_video = input_tokens.index(video_token_id, st)
1269
+ else:
1270
+ ed_video = len(input_tokens) + 1
1271
+ if ed_image < ed_video:
1272
+ t, h, w = (
1273
+ image_grid_thw[image_index][0],
1274
+ image_grid_thw[image_index][1],
1275
+ image_grid_thw[image_index][2],
1276
+ )
1277
+ image_index += 1
1278
+ remain_images -= 1
1279
+ ed = ed_image
1280
+
1281
+ else:
1282
+ t, h, w = (
1283
+ video_grid_thw[video_index][0],
1284
+ video_grid_thw[video_index][1],
1285
+ video_grid_thw[video_index][2],
1286
+ )
1287
+ video_index += 1
1288
+ remain_videos -= 1
1289
+ ed = ed_video
1290
+ llm_grid_t, llm_grid_h, llm_grid_w = (
1291
+ t.item(),
1292
+ h.item() // spatial_merge_size,
1293
+ w.item() // spatial_merge_size,
1294
+ )
1295
+ text_len = ed - st
1296
+
1297
+ st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0
1298
+ llm_pos_ids_list.append(torch.arange(text_len).view(1, -1).expand(3, -1) + st_idx)
1299
+
1300
+ # t_index is always 0 because llm_grid_t is always 1 (we use timestamps to encode the temporal information for videos)
1301
+ t_index = torch.arange(llm_grid_t).view(-1, 1).expand(-1, llm_grid_h * llm_grid_w).flatten()
1302
+ h_index = torch.arange(llm_grid_h).view(1, -1, 1).expand(llm_grid_t, -1, llm_grid_w).flatten()
1303
+ w_index = torch.arange(llm_grid_w).view(1, 1, -1).expand(llm_grid_t, llm_grid_h, -1).flatten()
1304
+ llm_pos_ids_list.append(torch.stack([t_index, h_index, w_index]) + text_len + st_idx)
1305
+ st = ed + llm_grid_t * llm_grid_h * llm_grid_w
1306
+
1307
+ if st < len(input_tokens):
1308
+ st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0
1309
+ text_len = len(input_tokens) - st
1310
+ llm_pos_ids_list.append(torch.arange(text_len).view(1, -1).expand(3, -1) + st_idx)
1311
+
1312
+ llm_positions = torch.cat(llm_pos_ids_list, dim=1).reshape(3, -1)
1313
+ position_ids[..., i, attention_mask[i] == 1] = llm_positions.to(position_ids.device)
1314
+ mrope_position_deltas.append(llm_positions.max() + 1 - len(total_input_ids[i]))
1315
+ mrope_position_deltas = torch.tensor(mrope_position_deltas, device=input_ids.device).unsqueeze(1)
1316
+ return position_ids, mrope_position_deltas
1317
+ else:
1318
+ if attention_mask is not None:
1319
+ position_ids = attention_mask.long().cumsum(-1) - 1
1320
+ position_ids.masked_fill_(attention_mask == 0, 1)
1321
+ position_ids = position_ids.unsqueeze(0).expand(3, -1, -1).to(attention_mask.device)
1322
+ max_position_ids = position_ids.max(0, keepdim=False)[0].max(-1, keepdim=True)[0]
1323
+ mrope_position_deltas = max_position_ids + 1 - attention_mask.shape[-1]
1324
+ else:
1325
+ position_ids = (
1326
+ torch.arange(input_ids.shape[1], device=input_ids.device)
1327
+ .view(1, 1, -1)
1328
+ .expand(3, input_ids.shape[0], -1)
1329
+ )
1330
+ mrope_position_deltas = torch.zeros(
1331
+ [input_ids.shape[0], 1],
1332
+ device=input_ids.device,
1333
+ dtype=input_ids.dtype,
1334
+ )
1335
+
1336
+ return position_ids, mrope_position_deltas
1337
+
1338
+ def get_video_features(
1339
+ self, pixel_values_videos: torch.FloatTensor, video_grid_thw: Optional[torch.LongTensor] = None
1340
+ ):
1341
+ """
1342
+ Encodes videos into continuous embeddings that can be forwarded to the language model. The deepstack visual features are also returned.
1343
+
1344
+ Args:
1345
+ pixel_values_videos (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`):
1346
+ The tensors corresponding to the input videos.
1347
+ video_grid_thw (`torch.LongTensor` of shape `(num_videos, 3)`, *optional*):
1348
+ The temporal, height and width of feature shape of each video in LLM.
1349
+ """
1350
+ # Same implementation as for images
1351
+ return self.get_image_features(pixel_values_videos, video_grid_thw)
1352
+
1353
+ def get_image_features(self, pixel_values: torch.FloatTensor, image_grid_thw: Optional[torch.LongTensor] = None):
1354
+ """
1355
+ Encodes images into continuous embeddings that can be forwarded to the language model. The deepstack visual features are also returned.
1356
+
1357
+ Args:
1358
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`):
1359
+ The tensors corresponding to the input images.
1360
+ image_grid_thw (`torch.LongTensor` of shape `(num_images, 3)`, *optional*):
1361
+ The temporal, height and width of feature shape of each image in LLM.
1362
+ """
1363
+ pixel_values = pixel_values.type(self.visual.dtype)
1364
+ image_embeds, deepstack_image_embeds = self.visual(pixel_values, grid_thw=image_grid_thw)
1365
+ split_sizes = (image_grid_thw.prod(-1) // self.visual.spatial_merge_size**2).tolist()
1366
+ image_embeds = torch.split(image_embeds, split_sizes)
1367
+ return image_embeds, deepstack_image_embeds
1368
+
1369
+ def get_placeholder_mask(
1370
+ self,
1371
+ input_ids: torch.LongTensor,
1372
+ inputs_embeds: torch.FloatTensor,
1373
+ image_features: Optional[torch.FloatTensor] = None,
1374
+ video_features: Optional[torch.FloatTensor] = None,
1375
+ ):
1376
+ """
1377
+ Obtains multimodal placeholder mask from `input_ids` or `inputs_embeds`, and checks that the placeholder token count is
1378
+ equal to the length of multimodal features. If the lengths are different, an error is raised.
1379
+ """
1380
+ if input_ids is None:
1381
+ special_image_mask = inputs_embeds == self.get_input_embeddings()(
1382
+ torch.tensor(self.config.image_token_id, dtype=torch.long, device=inputs_embeds.device)
1383
+ )
1384
+ special_image_mask = special_image_mask.all(-1)
1385
+ special_video_mask = inputs_embeds == self.get_input_embeddings()(
1386
+ torch.tensor(self.config.video_token_id, dtype=torch.long, device=inputs_embeds.device)
1387
+ )
1388
+ special_video_mask = special_video_mask.all(-1)
1389
+ else:
1390
+ special_image_mask = input_ids == self.config.image_token_id
1391
+ special_video_mask = input_ids == self.config.video_token_id
1392
+
1393
+ n_image_tokens = special_image_mask.sum()
1394
+ special_image_mask = special_image_mask.unsqueeze(-1).expand_as(inputs_embeds).to(inputs_embeds.device)
1395
+ if image_features is not None and inputs_embeds[special_image_mask].numel() != image_features.numel():
1396
+ raise ValueError(
1397
+ f"Image features and image tokens do not match: tokens: {n_image_tokens}, features {image_features.shape[0]}"
1398
+ )
1399
+
1400
+ n_video_tokens = special_video_mask.sum()
1401
+ special_video_mask = special_video_mask.unsqueeze(-1).expand_as(inputs_embeds).to(inputs_embeds.device)
1402
+ if video_features is not None and inputs_embeds[special_video_mask].numel() != video_features.numel():
1403
+ raise ValueError(
1404
+ f"Videos features and video tokens do not match: tokens: {n_video_tokens}, features {video_features.shape[0]}"
1405
+ )
1406
+
1407
+ return special_image_mask, special_video_mask
1408
+
1409
+ @auto_docstring
1410
+ @check_model_inputs()
1411
+ def forward(
1412
+ self,
1413
+ input_ids: torch.LongTensor = None,
1414
+ attention_mask: Optional[torch.Tensor] = None,
1415
+ position_ids: Optional[torch.LongTensor] = None,
1416
+ past_key_values: Optional[Cache] = None,
1417
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1418
+ pixel_values: Optional[torch.Tensor] = None,
1419
+ pixel_values_videos: Optional[torch.FloatTensor] = None,
1420
+ image_grid_thw: Optional[torch.LongTensor] = None,
1421
+ video_grid_thw: Optional[torch.LongTensor] = None,
1422
+ cache_position: Optional[torch.LongTensor] = None,
1423
+ # --- 🚨 LIME CACHE ARGUMENT START 🚨 ---
1424
+ # 用于接收推理时(Generation)缓存的视觉上下文
1425
+ cached_lime_visual_context: Optional[torch.Tensor] = None,
1426
+ cached_lime_visual_mask: Optional[torch.Tensor] = None,
1427
+ # --- 🚨 LIME CACHE ARGUMENT END 🚨 ---
1428
+ **kwargs: Unpack[TransformersKwargs],
1429
+ ) -> Union[tuple, Qwen3VLModelOutputWithPast]:
1430
+ r"""
1431
+ image_grid_thw (`torch.LongTensor` of shape `(num_images, 3)`, *optional*):
1432
+ The temporal, height and width of feature shape of each image in LLM.
1433
+ video_grid_thw (`torch.LongTensor` of shape `(num_videos, 3)`, *optional*):
1434
+ The temporal, height and width of feature shape of each video in LLM.
1435
+ cached_lime_visual_context (`torch.Tensor` of shape `(batch_size, vis_seqlen, hidden_size)`, *optional*):
1436
+ The cached visual context for Lime cross-attention during generation.
1437
+ cached_lime_visual_mask (`torch.Tensor` of shape `(batch_size, 1, 1, vis_seqlen)`, *optional*):
1438
+ The cached attention mask for Lime cross-attention during generation.
1439
+ """
1440
+ if (input_ids is None) ^ (inputs_embeds is not None):
1441
+ raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
1442
+
1443
+ if inputs_embeds is None:
1444
+ inputs_embeds = self.get_input_embeddings()(input_ids)
1445
+
1446
+ image_mask = None
1447
+ video_mask = None
1448
+
1449
+ if pixel_values is not None:
1450
+ image_embeds, deepstack_image_embeds = self.get_image_features(pixel_values, image_grid_thw)
1451
+ image_embeds = torch.cat(image_embeds, dim=0).to(inputs_embeds.device, inputs_embeds.dtype)
1452
+ image_mask, _ = self.get_placeholder_mask(
1453
+ input_ids, inputs_embeds=inputs_embeds, image_features=image_embeds
1454
+ )
1455
+ inputs_embeds = inputs_embeds.masked_scatter(image_mask, image_embeds)
1456
+
1457
+ if pixel_values_videos is not None:
1458
+ video_embeds, deepstack_video_embeds = self.get_video_features(pixel_values_videos, video_grid_thw)
1459
+ video_embeds = torch.cat(video_embeds, dim=0).to(inputs_embeds.device, inputs_embeds.dtype)
1460
+ _, video_mask = self.get_placeholder_mask(
1461
+ input_ids, inputs_embeds=inputs_embeds, video_features=video_embeds
1462
+ )
1463
+ inputs_embeds = inputs_embeds.masked_scatter(video_mask, video_embeds)
1464
+
1465
+ visual_pos_masks = None
1466
+ deepstack_visual_embeds = None
1467
+ if image_mask is not None and video_mask is not None:
1468
+ # aggregate visual_pos_masks and deepstack_visual_embeds
1469
+ image_mask = image_mask[..., 0]
1470
+ video_mask = video_mask[..., 0]
1471
+ visual_pos_masks = image_mask | video_mask
1472
+ deepstack_visual_embeds = []
1473
+ image_mask_joint = image_mask[visual_pos_masks]
1474
+ video_mask_joint = video_mask[visual_pos_masks]
1475
+ for img_embed, vid_embed in zip(deepstack_image_embeds, deepstack_video_embeds):
1476
+ embed_joint = img_embed.new_zeros(visual_pos_masks.sum(), img_embed.shape[-1]).to(img_embed.device)
1477
+ embed_joint[image_mask_joint, :] = img_embed
1478
+ embed_joint[video_mask_joint, :] = vid_embed
1479
+ deepstack_visual_embeds.append(embed_joint)
1480
+ elif image_mask is not None:
1481
+ image_mask = image_mask[..., 0]
1482
+ visual_pos_masks = image_mask
1483
+ deepstack_visual_embeds = deepstack_image_embeds
1484
+ elif video_mask is not None:
1485
+ video_mask = video_mask[..., 0]
1486
+ visual_pos_masks = video_mask
1487
+ deepstack_visual_embeds = deepstack_video_embeds
1488
+
1489
+ # ======================================================================
1490
+ # 🚨 LIME CONTEXT MANAGEMENT
1491
+ # ======================================================================
1492
+
1493
+ # 1. 获取当前的历史长度 (判定是否为新序列的关键)
1494
+ current_seq_len = 0 if past_key_values is None else past_key_values.get_seq_length()
1495
+ is_fresh_start = (current_seq_len == 0)
1496
+
1497
+ # 2. 判断当前输入是否包含新的视觉信息
1498
+ has_new_vision = (visual_pos_masks is not None)
1499
+
1500
+ lime_visual_context = None
1501
+ lime_visual_mask = None
1502
+
1503
+ if cached_lime_visual_context is not None:
1504
+ # Case A: 强制使用外部传入的 Context
1505
+ lime_visual_context = cached_lime_visual_context
1506
+ lime_visual_mask = cached_lime_visual_mask
1507
+
1508
+ elif has_new_vision:
1509
+ # Case B: 当前输入有图片/视频 -> 提取并更新 Cache
1510
+ # (注意:如果是多轮对话追加图片,这里简化为覆盖。如果需要支持多图累加,需做 concat)
1511
+ bsz = inputs_embeds.shape[0]
1512
+ extracted_visuals = []
1513
+
1514
+ for i in range(bsz):
1515
+ mask = visual_pos_masks[i] > 0
1516
+ feats = inputs_embeds[i][mask]
1517
+ extracted_visuals.append(feats)
1518
+
1519
+ lime_visual_context = pad_sequence(extracted_visuals, batch_first=True)
1520
+
1521
+ # 构建 Mask
1522
+ lengths = [x.shape[0] for x in extracted_visuals]
1523
+ max_len = lime_visual_context.shape[1]
1524
+ lime_visual_mask = torch.full(
1525
+ (bsz, 1, 1, max_len),
1526
+ torch.finfo(inputs_embeds.dtype).min,
1527
+ device=inputs_embeds.device,
1528
+ dtype=inputs_embeds.dtype
1529
+ )
1530
+ for i, l in enumerate(lengths):
1531
+ if l > 0:
1532
+ lime_visual_mask[i, ..., :l] = 0.0
1533
+
1534
+ # 更新内部 Cache
1535
+ self.lime_visual_context_cache = lime_visual_context
1536
+ self.lime_visual_mask_cache = lime_visual_mask
1537
+
1538
+ elif is_fresh_start:
1539
+ # Case C: 是新序列(Start),且没有视觉输入(No Vision) -> 纯文本请求
1540
+ # 🚨 必须清空 Cache,防止上一轮对话的图片残留
1541
+ self.lime_visual_context_cache = None
1542
+ self.lime_visual_mask_cache = None
1543
+ lime_visual_context = None
1544
+ lime_visual_mask = None
1545
+
1546
+ else:
1547
+ # Case D: 是生成过程(Decoding),且没有新图片 -> 使用 Cache
1548
+ lime_visual_context = self.lime_visual_context_cache
1549
+ lime_visual_mask = self.lime_visual_mask_cache
1550
+
1551
+ # ======================================================================
1552
+
1553
+ if position_ids is None:
1554
+ attention_mask_tensor = (
1555
+ attention_mask if not isinstance(attention_mask, dict) else attention_mask["full_attention"]
1556
+ )
1557
+ if attention_mask_tensor is not None and attention_mask_tensor.ndim == 4:
1558
+ attention_mask_tensor = torch.diagonal(attention_mask_tensor[:, 0], dim1=1, dim2=2)
1559
+ # Only apply conversion for floating point tensors (inverted masks)
1560
+ if attention_mask_tensor.dtype.is_floating_point:
1561
+ attention_mask_tensor = attention_mask_tensor / torch.finfo(attention_mask_tensor.dtype).min
1562
+ attention_mask_tensor = (1.0 - attention_mask_tensor).int()
1563
+
1564
+ # Calculate RoPE index once per generation in the pre-fill stage only.
1565
+ # When compiling, we can't check tensor values thus we check only input length
1566
+ # It is safe to assume that `length!=1` means we're in pre-fill because compiled
1567
+ # models currently cannot do asssisted decoding
1568
+ prefill_compiled_stage = is_torchdynamo_compiling() and (
1569
+ (input_ids is not None and input_ids.shape[1] != 1)
1570
+ or (inputs_embeds is not None and inputs_embeds.shape[1] != 1)
1571
+ )
1572
+ prefill_noncompiled_stage = not is_torchdynamo_compiling() and (
1573
+ (cache_position is not None and cache_position[0] == 0)
1574
+ or (past_key_values is None or past_key_values.get_seq_length() == 0)
1575
+ )
1576
+ if (prefill_compiled_stage or prefill_noncompiled_stage) or self.rope_deltas is None:
1577
+ position_ids, rope_deltas = self.get_rope_index(
1578
+ input_ids,
1579
+ image_grid_thw,
1580
+ video_grid_thw,
1581
+ attention_mask=attention_mask_tensor,
1582
+ )
1583
+ self.rope_deltas = rope_deltas
1584
+ # then use the prev pre-calculated rope-deltas to get the correct position ids
1585
+ else:
1586
+ batch_size, seq_length, _ = inputs_embeds.shape
1587
+ delta = (
1588
+ (cache_position[0] + self.rope_deltas).to(inputs_embeds.device)
1589
+ if cache_position is not None
1590
+ else 0
1591
+ )
1592
+ position_ids = torch.arange(seq_length, device=inputs_embeds.device)
1593
+ position_ids = position_ids.view(1, -1).expand(batch_size, -1)
1594
+ if cache_position is not None: # otherwise `deltas` is an int `0`
1595
+ delta = delta.repeat_interleave(batch_size // delta.shape[0], dim=0)
1596
+ position_ids = position_ids.add(delta)
1597
+ position_ids = position_ids.unsqueeze(0).expand(3, -1, -1)
1598
+
1599
+ outputs = self.language_model(
1600
+ input_ids=None,
1601
+ position_ids=position_ids,
1602
+ attention_mask=attention_mask,
1603
+ past_key_values=past_key_values,
1604
+ inputs_embeds=inputs_embeds,
1605
+ cache_position=cache_position,
1606
+ visual_pos_masks=visual_pos_masks,
1607
+ deepstack_visual_embeds=deepstack_visual_embeds,
1608
+ # --- 🚨 PASS LIME CONTEXT 🚨 ---
1609
+ lime_visual_context=lime_visual_context,
1610
+ lime_visual_mask=lime_visual_mask,
1611
+ # -------------------------------
1612
+ **kwargs,
1613
+ )
1614
+
1615
+ return Qwen3VLModelOutputWithPast(
1616
+ last_hidden_state=outputs.last_hidden_state,
1617
+ past_key_values=outputs.past_key_values,
1618
+ rope_deltas=self.rope_deltas,
1619
+ )
1620
+
1621
+
1622
+ @dataclass
1623
+ @auto_docstring(
1624
+ custom_intro="""
1625
+ Base class for Qwen3VL causal language model (or autoregressive) outputs.
1626
+ """
1627
+ )
1628
+ class Qwen3VLCausalLMOutputWithPast(ModelOutput):
1629
+ r"""
1630
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
1631
+ Language modeling loss (for next-token prediction).
1632
+ logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
1633
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
1634
+ past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
1635
+ It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
1636
+
1637
+ Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
1638
+ `past_key_values` input) to speed up sequential decoding.
1639
+ rope_deltas (`torch.LongTensor` of shape `(batch_size, )`, *optional*):
1640
+ The rope index difference between sequence length and multimodal rope.
1641
+ """
1642
+
1643
+ loss: Optional[torch.FloatTensor] = None
1644
+ logits: Optional[torch.FloatTensor] = None
1645
+ past_key_values: Optional[Cache] = None
1646
+ hidden_states: Optional[tuple[torch.FloatTensor]] = None
1647
+ attentions: Optional[tuple[torch.FloatTensor]] = None
1648
+ rope_deltas: Optional[torch.LongTensor] = None
1649
+
1650
+
1651
+ class LimeQwen3VLForConditionalGeneration(Qwen3VLPreTrainedModel, GenerationMixin):
1652
+ _checkpoint_conversion_mapping = {}
1653
+ _tied_weights_keys = ["lm_head.weight"]
1654
+ # Reference: fix gemma3 grad acc #37208
1655
+ accepts_loss_kwargs = False
1656
+ config: LimeQwen3VLConfig
1657
+
1658
+ def __init__(self, config):
1659
+ super().__init__(config)
1660
+ self.model = LimeQwen3VLModel(config)
1661
+ self.lm_head = nn.Linear(config.text_config.hidden_size, config.text_config.vocab_size, bias=False)
1662
+
1663
+ self.post_init()
1664
+
1665
+ def get_input_embeddings(self):
1666
+ return self.model.get_input_embeddings()
1667
+
1668
+ def set_input_embeddings(self, value):
1669
+ self.model.set_input_embeddings(value)
1670
+
1671
+ def set_decoder(self, decoder):
1672
+ self.model.set_decoder(decoder)
1673
+
1674
+ def get_decoder(self):
1675
+ return self.model.get_decoder()
1676
+
1677
+ def get_video_features(
1678
+ self, pixel_values_videos: torch.FloatTensor, video_grid_thw: Optional[torch.LongTensor] = None
1679
+ ):
1680
+ return self.model.get_video_features(pixel_values_videos, video_grid_thw)
1681
+
1682
+ def get_image_features(self, pixel_values: torch.FloatTensor, image_grid_thw: Optional[torch.LongTensor] = None):
1683
+ return self.model.get_image_features(pixel_values, image_grid_thw)
1684
+
1685
+ # Make modules available through conditional class for BC
1686
+ @property
1687
+ def language_model(self):
1688
+ return self.model.language_model
1689
+
1690
+ @property
1691
+ def visual(self):
1692
+ return self.model.visual
1693
+
1694
+ @check_model_inputs()
1695
+ def forward(
1696
+ self,
1697
+ input_ids: torch.LongTensor = None,
1698
+ attention_mask: Optional[torch.Tensor] = None,
1699
+ position_ids: Optional[torch.LongTensor] = None,
1700
+ past_key_values: Optional[Cache] = None,
1701
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1702
+ labels: Optional[torch.LongTensor] = None,
1703
+ pixel_values: Optional[torch.Tensor] = None,
1704
+ pixel_values_videos: Optional[torch.FloatTensor] = None,
1705
+ image_grid_thw: Optional[torch.LongTensor] = None,
1706
+ video_grid_thw: Optional[torch.LongTensor] = None,
1707
+ cache_position: Optional[torch.LongTensor] = None,
1708
+ logits_to_keep: Union[int, torch.Tensor] = 0,
1709
+ **kwargs: Unpack[TransformersKwargs],
1710
+ ) -> Union[tuple, Qwen3VLCausalLMOutputWithPast]:
1711
+ r"""
1712
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1713
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
1714
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
1715
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
1716
+ image_grid_thw (`torch.LongTensor` of shape `(num_images, 3)`, *optional*):
1717
+ The temporal, height and width of feature shape of each image in LLM.
1718
+ video_grid_thw (`torch.LongTensor` of shape `(num_videos, 3)`, *optional*):
1719
+ The temporal, height and width of feature shape of each video in LLM.
1720
+
1721
+ Example:
1722
+ TODO: Add example
1723
+ """
1724
+ outputs = self.model(
1725
+ input_ids=input_ids,
1726
+ pixel_values=pixel_values,
1727
+ pixel_values_videos=pixel_values_videos,
1728
+ image_grid_thw=image_grid_thw,
1729
+ video_grid_thw=video_grid_thw,
1730
+ position_ids=position_ids,
1731
+ attention_mask=attention_mask,
1732
+ past_key_values=past_key_values,
1733
+ inputs_embeds=inputs_embeds,
1734
+ cache_position=cache_position,
1735
+ **kwargs,
1736
+ )
1737
+
1738
+ hidden_states = outputs[0]
1739
+
1740
+ # Only compute necessary logits, and do not upcast them to float if we are not computing the loss
1741
+ slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
1742
+ logits = self.lm_head(hidden_states[:, slice_indices, :])
1743
+
1744
+ loss = None
1745
+ if labels is not None:
1746
+ loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.text_config.vocab_size)
1747
+
1748
+ return Qwen3VLCausalLMOutputWithPast(
1749
+ loss=loss,
1750
+ logits=logits,
1751
+ past_key_values=outputs.past_key_values,
1752
+ rope_deltas=outputs.rope_deltas,
1753
+ )
1754
+
1755
+ def prepare_inputs_for_generation(
1756
+ self,
1757
+ input_ids,
1758
+ past_key_values=None,
1759
+ attention_mask=None,
1760
+ inputs_embeds=None,
1761
+ cache_position=None,
1762
+ position_ids=None,
1763
+ use_cache=True,
1764
+ pixel_values=None,
1765
+ pixel_values_videos=None,
1766
+ image_grid_thw=None,
1767
+ video_grid_thw=None,
1768
+ **kwargs,
1769
+ ):
1770
+ # Overwritten -- in specific circumstances we don't want to forward image inputs to the model
1771
+
1772
+ model_inputs = super().prepare_inputs_for_generation(
1773
+ input_ids,
1774
+ past_key_values=past_key_values,
1775
+ attention_mask=attention_mask,
1776
+ inputs_embeds=inputs_embeds,
1777
+ cache_position=cache_position,
1778
+ position_ids=position_ids,
1779
+ pixel_values=pixel_values,
1780
+ pixel_values_videos=pixel_values_videos,
1781
+ image_grid_thw=image_grid_thw,
1782
+ video_grid_thw=video_grid_thw,
1783
+ use_cache=use_cache,
1784
+ **kwargs,
1785
+ )
1786
+
1787
+ # Qwen3VL position_ids are prepareed with rope_deltas in forward
1788
+ model_inputs["position_ids"] = None
1789
+
1790
+ if cache_position[0] != 0:
1791
+ model_inputs["pixel_values"] = None
1792
+ model_inputs["pixel_values_videos"] = None
1793
+
1794
+ return model_inputs
1795
+
1796
+ def _get_image_nums_and_video_nums(
1797
+ self,
1798
+ input_ids: Optional[torch.LongTensor],
1799
+ inputs_embeds: Optional[torch.Tensor] = None,
1800
+ ) -> tuple[torch.Tensor, torch.Tensor]:
1801
+ """
1802
+ Get the number of images and videos for each sample to calculate the separation length of the sample tensor.
1803
+ These parameters are not passed through the processor to avoid unpredictable impacts from interface modifications.
1804
+
1805
+ Args:
1806
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
1807
+ Indices of input sequence tokens in the vocabulary.
1808
+
1809
+ Returns:
1810
+ image_nums (`torch.LongTensor` of shape `(batch_size, num_images_sample)`)
1811
+ video_nums (`torch.LongTensor` of shape `(batch_size, num_videos_sample)`)
1812
+ """
1813
+ image_token_id = self.config.image_token_id
1814
+ video_token_id = self.config.video_token_id
1815
+ vision_start_token_id = self.config.vision_start_token_id
1816
+
1817
+ if inputs_embeds is not None:
1818
+ vision_start_mask = (
1819
+ inputs_embeds
1820
+ == self.get_input_embeddings()(
1821
+ torch.tensor(vision_start_token_id, dtype=torch.long, device=inputs_embeds.device)
1822
+ )
1823
+ )[..., 0]
1824
+ image_mask = (
1825
+ inputs_embeds
1826
+ == self.get_input_embeddings()(
1827
+ torch.tensor(image_token_id, dtype=torch.long, device=inputs_embeds.device)
1828
+ )
1829
+ )[..., 0]
1830
+ video_mask = (
1831
+ inputs_embeds
1832
+ == self.get_input_embeddings()(
1833
+ torch.tensor(video_token_id, dtype=torch.long, device=inputs_embeds.device)
1834
+ )
1835
+ )[..., 0]
1836
+ else:
1837
+ vision_start_mask = input_ids == vision_start_token_id
1838
+ image_mask = input_ids == image_token_id
1839
+ video_mask = input_ids == video_token_id
1840
+
1841
+ vision_first_mask = torch.roll(vision_start_mask, shifts=1, dims=1)
1842
+ image_nums = torch.sum(vision_first_mask & image_mask, dim=1)
1843
+ video_nums = torch.sum(vision_first_mask & video_mask, dim=1)
1844
+
1845
+ return image_nums, video_nums
1846
+
1847
+ def _expand_inputs_for_generation(
1848
+ self,
1849
+ expand_size: int = 1,
1850
+ is_encoder_decoder: bool = False,
1851
+ input_ids: Optional[torch.LongTensor] = None,
1852
+ **model_kwargs,
1853
+ ) -> tuple[torch.LongTensor, dict[str, Any]]:
1854
+ # Overwritten -- Support for expanding tensors without a batch size dimension
1855
+ # e.g., pixel_values, image_grid_thw, pixel_values_videos, video_grid_thw, second_per_grid_t
1856
+ # pixel_values.shape[0] is sum(seqlen_images for samples)
1857
+ # image_grid_thw.shape[0] is sum(num_images for samples)
1858
+
1859
+ if expand_size == 1:
1860
+ return input_ids, model_kwargs
1861
+
1862
+ visual_keys = ["pixel_values", "image_grid_thw", "pixel_values_videos", "video_grid_thw", "second_per_grid_ts"]
1863
+
1864
+ def _expand_dict_for_generation_visual(dict_to_expand):
1865
+ image_grid_thw = model_kwargs.get("image_grid_thw", None)
1866
+ video_grid_thw = model_kwargs.get("video_grid_thw", None)
1867
+ image_nums, video_nums = self._get_image_nums_and_video_nums(
1868
+ input_ids, inputs_embeds=model_kwargs.get("inputs_embeds", None)
1869
+ )
1870
+
1871
+ def _repeat_interleave_samples(x, lengths, repeat_times):
1872
+ samples = torch.split(x, lengths)
1873
+ repeat_args = [repeat_times] + [1] * (x.dim() - 1)
1874
+ result = torch.cat([sample.repeat(*repeat_args) for sample in samples], dim=0)
1875
+ return result
1876
+
1877
+ for key in dict_to_expand:
1878
+ if key == "pixel_values":
1879
+ # split images into samples
1880
+ samples = torch.split(image_grid_thw, list(image_nums))
1881
+ # compute the sequence length of images for each sample
1882
+ lengths = [torch.prod(sample, dim=1).sum() for sample in samples]
1883
+ dict_to_expand[key] = _repeat_interleave_samples(
1884
+ dict_to_expand[key], lengths=lengths, repeat_times=expand_size
1885
+ )
1886
+ elif key == "image_grid_thw":
1887
+ # get the num of images for each sample
1888
+ lengths = list(image_nums)
1889
+ dict_to_expand[key] = _repeat_interleave_samples(
1890
+ dict_to_expand[key], lengths=lengths, repeat_times=expand_size
1891
+ )
1892
+ elif key == "pixel_values_videos":
1893
+ samples = torch.split(video_grid_thw, list(video_nums))
1894
+ lengths = [torch.prod(sample, dim=1).sum() for sample in samples]
1895
+ dict_to_expand[key] = _repeat_interleave_samples(
1896
+ dict_to_expand[key], lengths=lengths, repeat_times=expand_size
1897
+ )
1898
+ elif key == "video_grid_thw":
1899
+ lengths = list(video_nums)
1900
+ dict_to_expand[key] = _repeat_interleave_samples(
1901
+ dict_to_expand[key], lengths=lengths, repeat_times=expand_size
1902
+ )
1903
+ elif key == "second_per_grid_ts":
1904
+ dict_to_expand[key] = _repeat_interleave_samples(
1905
+ dict_to_expand[key], lengths=list(video_nums), repeat_times=expand_size
1906
+ )
1907
+ return dict_to_expand
1908
+
1909
+ def _expand_dict_for_generation(dict_to_expand):
1910
+ for key in dict_to_expand:
1911
+ if (
1912
+ key != "cache_position"
1913
+ and dict_to_expand[key] is not None
1914
+ and isinstance(dict_to_expand[key], torch.Tensor)
1915
+ and key not in visual_keys
1916
+ ):
1917
+ dict_to_expand[key] = dict_to_expand[key].repeat_interleave(expand_size, dim=0)
1918
+ return dict_to_expand
1919
+
1920
+ model_kwargs = _expand_dict_for_generation_visual(model_kwargs)
1921
+
1922
+ if input_ids is not None:
1923
+ input_ids = input_ids.repeat_interleave(expand_size, dim=0)
1924
+
1925
+ model_kwargs = _expand_dict_for_generation(model_kwargs)
1926
+
1927
+ if is_encoder_decoder:
1928
+ if model_kwargs.get("encoder_outputs") is None:
1929
+ raise ValueError("If `is_encoder_decoder` is True, make sure that `encoder_outputs` is defined.")
1930
+ model_kwargs["encoder_outputs"] = _expand_dict_for_generation(model_kwargs["encoder_outputs"])
1931
+
1932
+ return input_ids, model_kwargs
1933
+
1934
+
1935
+ __all__ = [
1936
+ "Qwen3VLVisionModel",
1937
+ "LimeQwen3VLForConditionalGeneration",
1938
+ "LimeQwen3VLModel",
1939
+ "Qwen3VLPreTrainedModel",
1940
+ "Qwen3VLTextModel",
1941
+ ]
preprocessor_config.json ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "crop_size": null,
3
+ "data_format": "channels_first",
4
+ "default_to_square": true,
5
+ "device": null,
6
+ "disable_grouping": null,
7
+ "do_center_crop": null,
8
+ "do_convert_rgb": true,
9
+ "do_normalize": true,
10
+ "do_pad": null,
11
+ "do_rescale": true,
12
+ "do_resize": true,
13
+ "image_mean": [
14
+ 0.5,
15
+ 0.5,
16
+ 0.5
17
+ ],
18
+ "image_processor_type": "Qwen2VLImageProcessorFast",
19
+ "image_std": [
20
+ 0.5,
21
+ 0.5,
22
+ 0.5
23
+ ],
24
+ "input_data_format": null,
25
+ "max_pixels": null,
26
+ "merge_size": 2,
27
+ "min_pixels": null,
28
+ "pad_size": null,
29
+ "patch_size": 16,
30
+ "processor_class": "Qwen3VLProcessor",
31
+ "resample": 3,
32
+ "rescale_factor": 0.00392156862745098,
33
+ "return_tensors": null,
34
+ "size": {
35
+ "longest_edge": 16777216,
36
+ "shortest_edge": 65536
37
+ },
38
+ "temporal_patch_size": 2
39
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>",
5
+ "<|object_ref_start|>",
6
+ "<|object_ref_end|>",
7
+ "<|box_start|>",
8
+ "<|box_end|>",
9
+ "<|quad_start|>",
10
+ "<|quad_end|>",
11
+ "<|vision_start|>",
12
+ "<|vision_end|>",
13
+ "<|vision_pad|>",
14
+ "<|image_pad|>",
15
+ "<|video_pad|>"
16
+ ],
17
+ "eos_token": {
18
+ "content": "<|im_end|>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ },
24
+ "pad_token": {
25
+ "content": "<|endoftext|>",
26
+ "lstrip": false,
27
+ "normalized": false,
28
+ "rstrip": false,
29
+ "single_word": false
30
+ }
31
+ }
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a42db33c53048f926fb7c8a6cb1c31cc9485a4996ed99e64446243a57765449f
3
+ size 11422943
tokenizer_config.json ADDED
@@ -0,0 +1,246 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_prefix_space": false,
4
+ "added_tokens_decoder": {
5
+ "151643": {
6
+ "content": "<|endoftext|>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "151644": {
14
+ "content": "<|im_start|>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "151645": {
22
+ "content": "<|im_end|>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ },
29
+ "151646": {
30
+ "content": "<|object_ref_start|>",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false,
35
+ "special": true
36
+ },
37
+ "151647": {
38
+ "content": "<|object_ref_end|>",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false,
43
+ "special": true
44
+ },
45
+ "151648": {
46
+ "content": "<|box_start|>",
47
+ "lstrip": false,
48
+ "normalized": false,
49
+ "rstrip": false,
50
+ "single_word": false,
51
+ "special": true
52
+ },
53
+ "151649": {
54
+ "content": "<|box_end|>",
55
+ "lstrip": false,
56
+ "normalized": false,
57
+ "rstrip": false,
58
+ "single_word": false,
59
+ "special": true
60
+ },
61
+ "151650": {
62
+ "content": "<|quad_start|>",
63
+ "lstrip": false,
64
+ "normalized": false,
65
+ "rstrip": false,
66
+ "single_word": false,
67
+ "special": true
68
+ },
69
+ "151651": {
70
+ "content": "<|quad_end|>",
71
+ "lstrip": false,
72
+ "normalized": false,
73
+ "rstrip": false,
74
+ "single_word": false,
75
+ "special": true
76
+ },
77
+ "151652": {
78
+ "content": "<|vision_start|>",
79
+ "lstrip": false,
80
+ "normalized": false,
81
+ "rstrip": false,
82
+ "single_word": false,
83
+ "special": true
84
+ },
85
+ "151653": {
86
+ "content": "<|vision_end|>",
87
+ "lstrip": false,
88
+ "normalized": false,
89
+ "rstrip": false,
90
+ "single_word": false,
91
+ "special": true
92
+ },
93
+ "151654": {
94
+ "content": "<|vision_pad|>",
95
+ "lstrip": false,
96
+ "normalized": false,
97
+ "rstrip": false,
98
+ "single_word": false,
99
+ "special": true
100
+ },
101
+ "151655": {
102
+ "content": "<|image_pad|>",
103
+ "lstrip": false,
104
+ "normalized": false,
105
+ "rstrip": false,
106
+ "single_word": false,
107
+ "special": true
108
+ },
109
+ "151656": {
110
+ "content": "<|video_pad|>",
111
+ "lstrip": false,
112
+ "normalized": false,
113
+ "rstrip": false,
114
+ "single_word": false,
115
+ "special": true
116
+ },
117
+ "151657": {
118
+ "content": "<tool_call>",
119
+ "lstrip": false,
120
+ "normalized": false,
121
+ "rstrip": false,
122
+ "single_word": false,
123
+ "special": false
124
+ },
125
+ "151658": {
126
+ "content": "</tool_call>",
127
+ "lstrip": false,
128
+ "normalized": false,
129
+ "rstrip": false,
130
+ "single_word": false,
131
+ "special": false
132
+ },
133
+ "151659": {
134
+ "content": "<|fim_prefix|>",
135
+ "lstrip": false,
136
+ "normalized": false,
137
+ "rstrip": false,
138
+ "single_word": false,
139
+ "special": false
140
+ },
141
+ "151660": {
142
+ "content": "<|fim_middle|>",
143
+ "lstrip": false,
144
+ "normalized": false,
145
+ "rstrip": false,
146
+ "single_word": false,
147
+ "special": false
148
+ },
149
+ "151661": {
150
+ "content": "<|fim_suffix|>",
151
+ "lstrip": false,
152
+ "normalized": false,
153
+ "rstrip": false,
154
+ "single_word": false,
155
+ "special": false
156
+ },
157
+ "151662": {
158
+ "content": "<|fim_pad|>",
159
+ "lstrip": false,
160
+ "normalized": false,
161
+ "rstrip": false,
162
+ "single_word": false,
163
+ "special": false
164
+ },
165
+ "151663": {
166
+ "content": "<|repo_name|>",
167
+ "lstrip": false,
168
+ "normalized": false,
169
+ "rstrip": false,
170
+ "single_word": false,
171
+ "special": false
172
+ },
173
+ "151664": {
174
+ "content": "<|file_sep|>",
175
+ "lstrip": false,
176
+ "normalized": false,
177
+ "rstrip": false,
178
+ "single_word": false,
179
+ "special": false
180
+ },
181
+ "151665": {
182
+ "content": "<tool_response>",
183
+ "lstrip": false,
184
+ "normalized": false,
185
+ "rstrip": false,
186
+ "single_word": false,
187
+ "special": false
188
+ },
189
+ "151666": {
190
+ "content": "</tool_response>",
191
+ "lstrip": false,
192
+ "normalized": false,
193
+ "rstrip": false,
194
+ "single_word": false,
195
+ "special": false
196
+ },
197
+ "151667": {
198
+ "content": "<think>",
199
+ "lstrip": false,
200
+ "normalized": false,
201
+ "rstrip": false,
202
+ "single_word": false,
203
+ "special": false
204
+ },
205
+ "151668": {
206
+ "content": "</think>",
207
+ "lstrip": false,
208
+ "normalized": false,
209
+ "rstrip": false,
210
+ "single_word": false,
211
+ "special": false
212
+ }
213
+ },
214
+ "additional_special_tokens": [
215
+ "<|im_start|>",
216
+ "<|im_end|>",
217
+ "<|object_ref_start|>",
218
+ "<|object_ref_end|>",
219
+ "<|box_start|>",
220
+ "<|box_end|>",
221
+ "<|quad_start|>",
222
+ "<|quad_end|>",
223
+ "<|vision_start|>",
224
+ "<|vision_end|>",
225
+ "<|vision_pad|>",
226
+ "<|image_pad|>",
227
+ "<|video_pad|>"
228
+ ],
229
+ "bos_token": null,
230
+ "clean_up_tokenization_spaces": false,
231
+ "eos_token": "<|im_end|>",
232
+ "errors": "replace",
233
+ "extra_special_tokens": {},
234
+ "fix_mistral_regex": true,
235
+ "max_length": null,
236
+ "model_max_length": 262144,
237
+ "pad_to_multiple_of": null,
238
+ "pad_token": "<|endoftext|>",
239
+ "pad_token_type_id": 0,
240
+ "padding_side": "left",
241
+ "processor_class": "Qwen3VLProcessor",
242
+ "split_special_tokens": false,
243
+ "tokenizer_class": "Qwen2Tokenizer",
244
+ "truncation_side": "left",
245
+ "unk_token": null
246
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5e31842a178c20a83e27f310ec9965c49a20b52a2f4603c9d68187c378f51e15
3
+ size 9105
video_preprocessor_config.json ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "crop_size": null,
3
+ "data_format": "channels_first",
4
+ "default_to_square": true,
5
+ "device": null,
6
+ "do_center_crop": null,
7
+ "do_convert_rgb": true,
8
+ "do_normalize": true,
9
+ "do_rescale": true,
10
+ "do_resize": true,
11
+ "do_sample_frames": true,
12
+ "fps": 2,
13
+ "image_mean": [
14
+ 0.5,
15
+ 0.5,
16
+ 0.5
17
+ ],
18
+ "image_std": [
19
+ 0.5,
20
+ 0.5,
21
+ 0.5
22
+ ],
23
+ "input_data_format": null,
24
+ "max_frames": 768,
25
+ "merge_size": 2,
26
+ "min_frames": 4,
27
+ "num_frames": null,
28
+ "pad_size": null,
29
+ "patch_size": 16,
30
+ "processor_class": "Qwen3VLProcessor",
31
+ "resample": 3,
32
+ "rescale_factor": 0.00392156862745098,
33
+ "return_metadata": false,
34
+ "size": {
35
+ "longest_edge": 25165824,
36
+ "shortest_edge": 4096
37
+ },
38
+ "temporal_patch_size": 2,
39
+ "video_metadata": null,
40
+ "video_processor_type": "Qwen3VLVideoProcessor"
41
+ }
vocab.json ADDED
The diff for this file is too large to render. See raw diff