assafvayner HF Staff commited on
Commit
627e8c9
·
verified ·
1 Parent(s): 33f8f21

Upload folder

Browse files
.gitattributes CHANGED
@@ -64,3 +64,4 @@ blobs/39ab057316af49c3d81c67b80a98d72727ce686ac68ae72ce71a05fc5297b856 filter=lf
64
  blobs/e1219ef85875905368b39e3fe383d72fc6539ade5abf81f7cedf94a19275a345 filter=lfs diff=lfs merge=lfs -text
65
  blobs/9ce20192fbe0d521d100521f1e0836c415debacb615b89f7658178420822e710 filter=lfs diff=lfs merge=lfs -text
66
  blobs/1fa4abb1ce00765aa78c4714c71c65c57e46706564aa8f908e78d7c6fa51d07e filter=lfs diff=lfs merge=lfs -text
 
 
64
  blobs/e1219ef85875905368b39e3fe383d72fc6539ade5abf81f7cedf94a19275a345 filter=lfs diff=lfs merge=lfs -text
65
  blobs/9ce20192fbe0d521d100521f1e0836c415debacb615b89f7658178420822e710 filter=lfs diff=lfs merge=lfs -text
66
  blobs/1fa4abb1ce00765aa78c4714c71c65c57e46706564aa8f908e78d7c6fa51d07e filter=lfs diff=lfs merge=lfs -text
67
+ blobs/f78e77184ab4f3d53e28b85ffda8b328185cc40fcfaab59d17c3df0f2524621e.incomplete filter=lfs diff=lfs merge=lfs -text
blobs/3009e23970737db921277af66eb208b6b1e8702f ADDED
@@ -0,0 +1,146 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {%- if not add_generation_prompt is defined %}
2
+ {%- set add_generation_prompt = false %}
3
+ {%- endif %}
4
+ {%- set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='', is_first_sp=true, is_first_user=true, is_last_user=false) %}
5
+ {%- for message in messages %}
6
+ {%- if message['role'] == 'system' %}
7
+ {%- if ns.is_first_sp %}
8
+ {%- set ns.system_prompt = ns.system_prompt + message['content'] %}
9
+ {%- set ns.is_first_sp = false %}
10
+ {%- else %}
11
+ {% set ns.system_prompt = ns.system_prompt + '\n\n' + message['content'] %}
12
+ {%- endif %}
13
+ {%- endif %}
14
+ {%- endfor %}
15
+ {{- bos_token }}
16
+ {{- ns.system_prompt }}
17
+ {%- if tools %}
18
+ {%- if ns.system_prompt != '' %}
19
+ {{- '\n\n# Tools\n\nYou may call one or more functions to assist with the user query.' }}
20
+ {%- else %}
21
+ {{- '# Tools\n\nYou may call one or more functions to assist with the user query.' }}
22
+ {%- endif %}
23
+ {{- '\n\nYou are provided with function signatures within <tools></tools> XML tags:' }}
24
+ {{- '\n<tools>\n' }}
25
+ {%- for tool in tools %}
26
+ {%- if loop.index0 > 1 %}
27
+ {{- '\n' }}
28
+ {%- endif %}
29
+ {{- tool | tojson }}
30
+ {%- endfor %}
31
+ {{- '\n</tools>\n\n' }}
32
+ {{- 'For function call returns, you should first print <tool_calls>' }}
33
+ {{- 'For each function call, you should return object like:\n' }}
34
+ {{- '<tool_call>function_name\n```json\nfunction_arguments_in_json_format\n```</tool_call>' }}
35
+ {{- 'At the end of function call returns, you should print </tool_calls>' }}
36
+ {%- endif %}
37
+ {%- if ns.system_prompt != '' or tools %}
38
+ {{- '<|hy_place▁holder▁no▁3|>' }}
39
+ {%- endif %}
40
+ {%- set image_count = namespace(value=0) %}
41
+ {%- set video_count = namespace(value=0) %}
42
+ {%- set last_user_idx = namespace(value=-1) %}
43
+ {%- for message in messages %}
44
+ {%- if message['role'] == 'user' %}
45
+ {%- set last_user_idx.value = loop.index0 %}
46
+ {%- endif %}
47
+ {%- endfor %}
48
+ {%- for message in messages %}
49
+ {%- if message['role'] == 'user' %}
50
+ {%- set ns.is_tool = false %}
51
+ {%- set ns.is_first = false %}
52
+ {%- set ns.is_last_user = true %}
53
+ {{- '<|hy_User|>'}}
54
+ {%- if message.content is string %}
55
+ {{- message.content }}
56
+ {%- else %}
57
+ {%- for content in message.content %}
58
+ {%- if content.type == 'image' or 'image' in content or 'image_url' in content %}
59
+ {%- set image_count.value = image_count.value + 1 %}
60
+ {%- if add_vision_id %}Picture {{ image_count.value }}: {% endif -%}
61
+ <|hy_place▁holder▁no▁666|><|hy_place▁holder▁no▁669|><|hy_place▁holder▁no▁672|><|hy_place▁holder▁no▁667|>
62
+ {%- elif content.type == 'video' or 'video' in content %}
63
+ {%- set video_count.value = video_count.value + 1 %}
64
+ {%- if add_vision_id %}Video {{ video_count.value }}: {% endif -%}
65
+ <|hy_place▁holder▁no▁666|><|hy_place▁holder▁no▁670|><|hy_place▁holder▁no▁672|><|hy_place▁holder▁no▁667|>
66
+ {%- elif 'text' in content %}
67
+ {{- content.text }}
68
+ {%- endif %}
69
+ {%- endfor %}
70
+ {%- endif %}
71
+ {%- if loop.index0 == last_user_idx.value %}
72
+ {%- if enable_thinking is defined and enable_thinking %}
73
+ {{- '/think' }}
74
+ {%- else %}
75
+ {{- '/no_think' }}
76
+ {%- endif %}
77
+ {%- else %}
78
+ {{- '/no_think' }}
79
+ {%- endif %}
80
+ {%- endif %}
81
+ {%- if message['role'] == 'assistant' and message['tool_calls'] is defined and message['tool_calls'] is not none %}
82
+ {%- set ns.is_last_user = false %}
83
+ {%- if ns.is_tool %}
84
+ {{- '</tool_responses>' }}
85
+ {%- endif %}
86
+ {{- '<|hy_Assistant|>' }}
87
+ {%- set ns.is_first = false %}
88
+ {%- set ns.is_tool = false %}
89
+ {%- set ns.is_output_first = true %}
90
+ {%- for tool in message['tool_calls'] %}
91
+ {%- set arguments = tool['function']['arguments'] %}
92
+ {%- if arguments is not string %}
93
+ {%- set arguments = arguments | tojson %}
94
+ {%- endif %}
95
+ {%- if not ns.is_first %}
96
+ {%- if message['content'] is none %}
97
+ {{- '<tool_calls><tool_call>' + tool['function']['name'] + '\n```json\n' + arguments + '\n```</tool_call>' }}
98
+ {%- else %}
99
+ {{- message['content'] + '<tool_calls><tool_call>' + tool['function']['name'] + '\n```json\n' + arguments + '\n```</tool_call>' }}
100
+ {%- endif %}
101
+ {%- set ns.is_first = true %}
102
+ {%- else %}
103
+ {{- '\n<tool_call>' + tool['function']['name'] + '\n```json\n' + arguments + '\n```</tool_call>' }}
104
+ {%- endif %}
105
+ {%- endfor %}
106
+ {{- '</tool_calls>' + eos_token }}
107
+ {%- endif %}
108
+ {%- if message['role'] == 'assistant' and (message['tool_calls'] is not defined or message['tool_calls'] is none) %}
109
+ {%- set ns.is_last_user = false %}
110
+ {{- '<|hy_Assistant|>' }}
111
+ {{- '<think>\n\n</think>\n' }}
112
+ {{- '<answer>\n' }}
113
+ {%- if message['content'] is string %}
114
+ {{- message['content'] }}
115
+ {%- else %}
116
+ {%- for content_item in message['content'] %}
117
+ {%- if 'text' in content_item %}
118
+ {{- content_item['text'] }}
119
+ {%- endif %}
120
+ {%- endfor %}
121
+ {%- endif %}
122
+ {{- '\n</answer>' }}
123
+ {{- eos_token }}
124
+ {%- endif %}
125
+ {%- if message['role'] == 'tool' %}
126
+ {%- set ns.is_last_user = false %}
127
+ {%- set ns.is_tool = true %}
128
+ {%- if ns.is_output_first %}
129
+ {{- '<|hy_User|>' + '<tool_responses><tool_response>' + message['content'] + '</tool_response>' }}
130
+ {%- set ns.is_output_first = false %}
131
+ {%- else %}
132
+ {{- '\n<tool_response>' + message['content'] + '</tool_response>' }}
133
+ {%- endif %}
134
+ {%- endif %}
135
+ {%- endfor %}
136
+ {%- if ns.is_tool %}
137
+ {{- '</tool_responses>' }}
138
+ {%- endif %}
139
+ {%- if add_generation_prompt %}
140
+ {{- '<|hy_Assistant|>' }}
141
+ {%- if enable_thinking is defined and enable_thinking %}
142
+ {{- '<think>' }}
143
+ {%- else %}
144
+ {{- '<think>\n\n</think>\n' }}
145
+ {%- endif %}
146
+ {%- endif %}
blobs/3709311b8b7d4f014acc4d46d1d59ac7189a4890 ADDED
@@ -0,0 +1,985 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "metadata": {
3
+ "total_parameters": 3785086960,
4
+ "total_size": 7570173920
5
+ },
6
+ "weight_map": {
7
+ "model.language_model.model.embed_tokens.weight": "model-00001-of-00002.safetensors",
8
+ "model.language_model.model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
9
+ "model.language_model.model.layers.0.input_layernorm_v.weight": "model-00001-of-00002.safetensors",
10
+ "model.language_model.model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
11
+ "model.language_model.model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
12
+ "model.language_model.model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
13
+ "model.language_model.model.layers.0.mlp_v.down_proj.weight": "model-00001-of-00002.safetensors",
14
+ "model.language_model.model.layers.0.mlp_v.gate_proj.weight": "model-00001-of-00002.safetensors",
15
+ "model.language_model.model.layers.0.mlp_v.up_proj.weight": "model-00001-of-00002.safetensors",
16
+ "model.language_model.model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
17
+ "model.language_model.model.layers.0.post_attention_layernorm_v.weight": "model-00001-of-00002.safetensors",
18
+ "model.language_model.model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
19
+ "model.language_model.model.layers.0.self_attn.k_proj_v.weight": "model-00001-of-00002.safetensors",
20
+ "model.language_model.model.layers.0.self_attn.key_layernorm.weight": "model-00001-of-00002.safetensors",
21
+ "model.language_model.model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
22
+ "model.language_model.model.layers.0.self_attn.o_proj_v.weight": "model-00001-of-00002.safetensors",
23
+ "model.language_model.model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
24
+ "model.language_model.model.layers.0.self_attn.q_proj_v.weight": "model-00001-of-00002.safetensors",
25
+ "model.language_model.model.layers.0.self_attn.query_layernorm.weight": "model-00001-of-00002.safetensors",
26
+ "model.language_model.model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
27
+ "model.language_model.model.layers.0.self_attn.v_proj_v.weight": "model-00001-of-00002.safetensors",
28
+ "model.language_model.model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
29
+ "model.language_model.model.layers.1.input_layernorm_v.weight": "model-00001-of-00002.safetensors",
30
+ "model.language_model.model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
31
+ "model.language_model.model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
32
+ "model.language_model.model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
33
+ "model.language_model.model.layers.1.mlp_v.down_proj.weight": "model-00001-of-00002.safetensors",
34
+ "model.language_model.model.layers.1.mlp_v.gate_proj.weight": "model-00001-of-00002.safetensors",
35
+ "model.language_model.model.layers.1.mlp_v.up_proj.weight": "model-00001-of-00002.safetensors",
36
+ "model.language_model.model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
37
+ "model.language_model.model.layers.1.post_attention_layernorm_v.weight": "model-00001-of-00002.safetensors",
38
+ "model.language_model.model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
39
+ "model.language_model.model.layers.1.self_attn.k_proj_v.weight": "model-00001-of-00002.safetensors",
40
+ "model.language_model.model.layers.1.self_attn.key_layernorm.weight": "model-00001-of-00002.safetensors",
41
+ "model.language_model.model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
42
+ "model.language_model.model.layers.1.self_attn.o_proj_v.weight": "model-00001-of-00002.safetensors",
43
+ "model.language_model.model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
44
+ "model.language_model.model.layers.1.self_attn.q_proj_v.weight": "model-00001-of-00002.safetensors",
45
+ "model.language_model.model.layers.1.self_attn.query_layernorm.weight": "model-00001-of-00002.safetensors",
46
+ "model.language_model.model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
47
+ "model.language_model.model.layers.1.self_attn.v_proj_v.weight": "model-00001-of-00002.safetensors",
48
+ "model.language_model.model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
49
+ "model.language_model.model.layers.10.input_layernorm_v.weight": "model-00001-of-00002.safetensors",
50
+ "model.language_model.model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
51
+ "model.language_model.model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
52
+ "model.language_model.model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
53
+ "model.language_model.model.layers.10.mlp_v.down_proj.weight": "model-00001-of-00002.safetensors",
54
+ "model.language_model.model.layers.10.mlp_v.gate_proj.weight": "model-00001-of-00002.safetensors",
55
+ "model.language_model.model.layers.10.mlp_v.up_proj.weight": "model-00001-of-00002.safetensors",
56
+ "model.language_model.model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
57
+ "model.language_model.model.layers.10.post_attention_layernorm_v.weight": "model-00001-of-00002.safetensors",
58
+ "model.language_model.model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
59
+ "model.language_model.model.layers.10.self_attn.k_proj_v.weight": "model-00001-of-00002.safetensors",
60
+ "model.language_model.model.layers.10.self_attn.key_layernorm.weight": "model-00001-of-00002.safetensors",
61
+ "model.language_model.model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
62
+ "model.language_model.model.layers.10.self_attn.o_proj_v.weight": "model-00001-of-00002.safetensors",
63
+ "model.language_model.model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
64
+ "model.language_model.model.layers.10.self_attn.q_proj_v.weight": "model-00001-of-00002.safetensors",
65
+ "model.language_model.model.layers.10.self_attn.query_layernorm.weight": "model-00001-of-00002.safetensors",
66
+ "model.language_model.model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
67
+ "model.language_model.model.layers.10.self_attn.v_proj_v.weight": "model-00001-of-00002.safetensors",
68
+ "model.language_model.model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
69
+ "model.language_model.model.layers.11.input_layernorm_v.weight": "model-00001-of-00002.safetensors",
70
+ "model.language_model.model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
71
+ "model.language_model.model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
72
+ "model.language_model.model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
73
+ "model.language_model.model.layers.11.mlp_v.down_proj.weight": "model-00001-of-00002.safetensors",
74
+ "model.language_model.model.layers.11.mlp_v.gate_proj.weight": "model-00001-of-00002.safetensors",
75
+ "model.language_model.model.layers.11.mlp_v.up_proj.weight": "model-00001-of-00002.safetensors",
76
+ "model.language_model.model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
77
+ "model.language_model.model.layers.11.post_attention_layernorm_v.weight": "model-00001-of-00002.safetensors",
78
+ "model.language_model.model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
79
+ "model.language_model.model.layers.11.self_attn.k_proj_v.weight": "model-00001-of-00002.safetensors",
80
+ "model.language_model.model.layers.11.self_attn.key_layernorm.weight": "model-00001-of-00002.safetensors",
81
+ "model.language_model.model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
82
+ "model.language_model.model.layers.11.self_attn.o_proj_v.weight": "model-00001-of-00002.safetensors",
83
+ "model.language_model.model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
84
+ "model.language_model.model.layers.11.self_attn.q_proj_v.weight": "model-00001-of-00002.safetensors",
85
+ "model.language_model.model.layers.11.self_attn.query_layernorm.weight": "model-00001-of-00002.safetensors",
86
+ "model.language_model.model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
87
+ "model.language_model.model.layers.11.self_attn.v_proj_v.weight": "model-00001-of-00002.safetensors",
88
+ "model.language_model.model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
89
+ "model.language_model.model.layers.12.input_layernorm_v.weight": "model-00001-of-00002.safetensors",
90
+ "model.language_model.model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
91
+ "model.language_model.model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
92
+ "model.language_model.model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
93
+ "model.language_model.model.layers.12.mlp_v.down_proj.weight": "model-00001-of-00002.safetensors",
94
+ "model.language_model.model.layers.12.mlp_v.gate_proj.weight": "model-00001-of-00002.safetensors",
95
+ "model.language_model.model.layers.12.mlp_v.up_proj.weight": "model-00001-of-00002.safetensors",
96
+ "model.language_model.model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
97
+ "model.language_model.model.layers.12.post_attention_layernorm_v.weight": "model-00001-of-00002.safetensors",
98
+ "model.language_model.model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
99
+ "model.language_model.model.layers.12.self_attn.k_proj_v.weight": "model-00001-of-00002.safetensors",
100
+ "model.language_model.model.layers.12.self_attn.key_layernorm.weight": "model-00001-of-00002.safetensors",
101
+ "model.language_model.model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
102
+ "model.language_model.model.layers.12.self_attn.o_proj_v.weight": "model-00001-of-00002.safetensors",
103
+ "model.language_model.model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
104
+ "model.language_model.model.layers.12.self_attn.q_proj_v.weight": "model-00001-of-00002.safetensors",
105
+ "model.language_model.model.layers.12.self_attn.query_layernorm.weight": "model-00001-of-00002.safetensors",
106
+ "model.language_model.model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
107
+ "model.language_model.model.layers.12.self_attn.v_proj_v.weight": "model-00001-of-00002.safetensors",
108
+ "model.language_model.model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
109
+ "model.language_model.model.layers.13.input_layernorm_v.weight": "model-00001-of-00002.safetensors",
110
+ "model.language_model.model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
111
+ "model.language_model.model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
112
+ "model.language_model.model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
113
+ "model.language_model.model.layers.13.mlp_v.down_proj.weight": "model-00001-of-00002.safetensors",
114
+ "model.language_model.model.layers.13.mlp_v.gate_proj.weight": "model-00001-of-00002.safetensors",
115
+ "model.language_model.model.layers.13.mlp_v.up_proj.weight": "model-00001-of-00002.safetensors",
116
+ "model.language_model.model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
117
+ "model.language_model.model.layers.13.post_attention_layernorm_v.weight": "model-00001-of-00002.safetensors",
118
+ "model.language_model.model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
119
+ "model.language_model.model.layers.13.self_attn.k_proj_v.weight": "model-00001-of-00002.safetensors",
120
+ "model.language_model.model.layers.13.self_attn.key_layernorm.weight": "model-00001-of-00002.safetensors",
121
+ "model.language_model.model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
122
+ "model.language_model.model.layers.13.self_attn.o_proj_v.weight": "model-00001-of-00002.safetensors",
123
+ "model.language_model.model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
124
+ "model.language_model.model.layers.13.self_attn.q_proj_v.weight": "model-00001-of-00002.safetensors",
125
+ "model.language_model.model.layers.13.self_attn.query_layernorm.weight": "model-00001-of-00002.safetensors",
126
+ "model.language_model.model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
127
+ "model.language_model.model.layers.13.self_attn.v_proj_v.weight": "model-00001-of-00002.safetensors",
128
+ "model.language_model.model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
129
+ "model.language_model.model.layers.14.input_layernorm_v.weight": "model-00001-of-00002.safetensors",
130
+ "model.language_model.model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
131
+ "model.language_model.model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
132
+ "model.language_model.model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
133
+ "model.language_model.model.layers.14.mlp_v.down_proj.weight": "model-00001-of-00002.safetensors",
134
+ "model.language_model.model.layers.14.mlp_v.gate_proj.weight": "model-00001-of-00002.safetensors",
135
+ "model.language_model.model.layers.14.mlp_v.up_proj.weight": "model-00001-of-00002.safetensors",
136
+ "model.language_model.model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
137
+ "model.language_model.model.layers.14.post_attention_layernorm_v.weight": "model-00001-of-00002.safetensors",
138
+ "model.language_model.model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
139
+ "model.language_model.model.layers.14.self_attn.k_proj_v.weight": "model-00001-of-00002.safetensors",
140
+ "model.language_model.model.layers.14.self_attn.key_layernorm.weight": "model-00001-of-00002.safetensors",
141
+ "model.language_model.model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
142
+ "model.language_model.model.layers.14.self_attn.o_proj_v.weight": "model-00001-of-00002.safetensors",
143
+ "model.language_model.model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
144
+ "model.language_model.model.layers.14.self_attn.q_proj_v.weight": "model-00001-of-00002.safetensors",
145
+ "model.language_model.model.layers.14.self_attn.query_layernorm.weight": "model-00001-of-00002.safetensors",
146
+ "model.language_model.model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
147
+ "model.language_model.model.layers.14.self_attn.v_proj_v.weight": "model-00001-of-00002.safetensors",
148
+ "model.language_model.model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
149
+ "model.language_model.model.layers.15.input_layernorm_v.weight": "model-00001-of-00002.safetensors",
150
+ "model.language_model.model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
151
+ "model.language_model.model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
152
+ "model.language_model.model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
153
+ "model.language_model.model.layers.15.mlp_v.down_proj.weight": "model-00001-of-00002.safetensors",
154
+ "model.language_model.model.layers.15.mlp_v.gate_proj.weight": "model-00001-of-00002.safetensors",
155
+ "model.language_model.model.layers.15.mlp_v.up_proj.weight": "model-00001-of-00002.safetensors",
156
+ "model.language_model.model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
157
+ "model.language_model.model.layers.15.post_attention_layernorm_v.weight": "model-00001-of-00002.safetensors",
158
+ "model.language_model.model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
159
+ "model.language_model.model.layers.15.self_attn.k_proj_v.weight": "model-00001-of-00002.safetensors",
160
+ "model.language_model.model.layers.15.self_attn.key_layernorm.weight": "model-00001-of-00002.safetensors",
161
+ "model.language_model.model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
162
+ "model.language_model.model.layers.15.self_attn.o_proj_v.weight": "model-00001-of-00002.safetensors",
163
+ "model.language_model.model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
164
+ "model.language_model.model.layers.15.self_attn.q_proj_v.weight": "model-00001-of-00002.safetensors",
165
+ "model.language_model.model.layers.15.self_attn.query_layernorm.weight": "model-00001-of-00002.safetensors",
166
+ "model.language_model.model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
167
+ "model.language_model.model.layers.15.self_attn.v_proj_v.weight": "model-00001-of-00002.safetensors",
168
+ "model.language_model.model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
169
+ "model.language_model.model.layers.16.input_layernorm_v.weight": "model-00001-of-00002.safetensors",
170
+ "model.language_model.model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
171
+ "model.language_model.model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
172
+ "model.language_model.model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
173
+ "model.language_model.model.layers.16.mlp_v.down_proj.weight": "model-00001-of-00002.safetensors",
174
+ "model.language_model.model.layers.16.mlp_v.gate_proj.weight": "model-00001-of-00002.safetensors",
175
+ "model.language_model.model.layers.16.mlp_v.up_proj.weight": "model-00001-of-00002.safetensors",
176
+ "model.language_model.model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
177
+ "model.language_model.model.layers.16.post_attention_layernorm_v.weight": "model-00001-of-00002.safetensors",
178
+ "model.language_model.model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
179
+ "model.language_model.model.layers.16.self_attn.k_proj_v.weight": "model-00001-of-00002.safetensors",
180
+ "model.language_model.model.layers.16.self_attn.key_layernorm.weight": "model-00001-of-00002.safetensors",
181
+ "model.language_model.model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
182
+ "model.language_model.model.layers.16.self_attn.o_proj_v.weight": "model-00001-of-00002.safetensors",
183
+ "model.language_model.model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
184
+ "model.language_model.model.layers.16.self_attn.q_proj_v.weight": "model-00001-of-00002.safetensors",
185
+ "model.language_model.model.layers.16.self_attn.query_layernorm.weight": "model-00001-of-00002.safetensors",
186
+ "model.language_model.model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
187
+ "model.language_model.model.layers.16.self_attn.v_proj_v.weight": "model-00001-of-00002.safetensors",
188
+ "model.language_model.model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
189
+ "model.language_model.model.layers.17.input_layernorm_v.weight": "model-00001-of-00002.safetensors",
190
+ "model.language_model.model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
191
+ "model.language_model.model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
192
+ "model.language_model.model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
193
+ "model.language_model.model.layers.17.mlp_v.down_proj.weight": "model-00001-of-00002.safetensors",
194
+ "model.language_model.model.layers.17.mlp_v.gate_proj.weight": "model-00001-of-00002.safetensors",
195
+ "model.language_model.model.layers.17.mlp_v.up_proj.weight": "model-00001-of-00002.safetensors",
196
+ "model.language_model.model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
197
+ "model.language_model.model.layers.17.post_attention_layernorm_v.weight": "model-00001-of-00002.safetensors",
198
+ "model.language_model.model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
199
+ "model.language_model.model.layers.17.self_attn.k_proj_v.weight": "model-00001-of-00002.safetensors",
200
+ "model.language_model.model.layers.17.self_attn.key_layernorm.weight": "model-00001-of-00002.safetensors",
201
+ "model.language_model.model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
202
+ "model.language_model.model.layers.17.self_attn.o_proj_v.weight": "model-00001-of-00002.safetensors",
203
+ "model.language_model.model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
204
+ "model.language_model.model.layers.17.self_attn.q_proj_v.weight": "model-00001-of-00002.safetensors",
205
+ "model.language_model.model.layers.17.self_attn.query_layernorm.weight": "model-00001-of-00002.safetensors",
206
+ "model.language_model.model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
207
+ "model.language_model.model.layers.17.self_attn.v_proj_v.weight": "model-00001-of-00002.safetensors",
208
+ "model.language_model.model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
209
+ "model.language_model.model.layers.18.input_layernorm_v.weight": "model-00001-of-00002.safetensors",
210
+ "model.language_model.model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
211
+ "model.language_model.model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
212
+ "model.language_model.model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
213
+ "model.language_model.model.layers.18.mlp_v.down_proj.weight": "model-00001-of-00002.safetensors",
214
+ "model.language_model.model.layers.18.mlp_v.gate_proj.weight": "model-00001-of-00002.safetensors",
215
+ "model.language_model.model.layers.18.mlp_v.up_proj.weight": "model-00001-of-00002.safetensors",
216
+ "model.language_model.model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
217
+ "model.language_model.model.layers.18.post_attention_layernorm_v.weight": "model-00001-of-00002.safetensors",
218
+ "model.language_model.model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
219
+ "model.language_model.model.layers.18.self_attn.k_proj_v.weight": "model-00001-of-00002.safetensors",
220
+ "model.language_model.model.layers.18.self_attn.key_layernorm.weight": "model-00001-of-00002.safetensors",
221
+ "model.language_model.model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
222
+ "model.language_model.model.layers.18.self_attn.o_proj_v.weight": "model-00001-of-00002.safetensors",
223
+ "model.language_model.model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
224
+ "model.language_model.model.layers.18.self_attn.q_proj_v.weight": "model-00001-of-00002.safetensors",
225
+ "model.language_model.model.layers.18.self_attn.query_layernorm.weight": "model-00001-of-00002.safetensors",
226
+ "model.language_model.model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
227
+ "model.language_model.model.layers.18.self_attn.v_proj_v.weight": "model-00001-of-00002.safetensors",
228
+ "model.language_model.model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
229
+ "model.language_model.model.layers.19.input_layernorm_v.weight": "model-00001-of-00002.safetensors",
230
+ "model.language_model.model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
231
+ "model.language_model.model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
232
+ "model.language_model.model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
233
+ "model.language_model.model.layers.19.mlp_v.down_proj.weight": "model-00001-of-00002.safetensors",
234
+ "model.language_model.model.layers.19.mlp_v.gate_proj.weight": "model-00001-of-00002.safetensors",
235
+ "model.language_model.model.layers.19.mlp_v.up_proj.weight": "model-00001-of-00002.safetensors",
236
+ "model.language_model.model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
237
+ "model.language_model.model.layers.19.post_attention_layernorm_v.weight": "model-00001-of-00002.safetensors",
238
+ "model.language_model.model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
239
+ "model.language_model.model.layers.19.self_attn.k_proj_v.weight": "model-00001-of-00002.safetensors",
240
+ "model.language_model.model.layers.19.self_attn.key_layernorm.weight": "model-00001-of-00002.safetensors",
241
+ "model.language_model.model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
242
+ "model.language_model.model.layers.19.self_attn.o_proj_v.weight": "model-00001-of-00002.safetensors",
243
+ "model.language_model.model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
244
+ "model.language_model.model.layers.19.self_attn.q_proj_v.weight": "model-00001-of-00002.safetensors",
245
+ "model.language_model.model.layers.19.self_attn.query_layernorm.weight": "model-00001-of-00002.safetensors",
246
+ "model.language_model.model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
247
+ "model.language_model.model.layers.19.self_attn.v_proj_v.weight": "model-00001-of-00002.safetensors",
248
+ "model.language_model.model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
249
+ "model.language_model.model.layers.2.input_layernorm_v.weight": "model-00001-of-00002.safetensors",
250
+ "model.language_model.model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
251
+ "model.language_model.model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
252
+ "model.language_model.model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
253
+ "model.language_model.model.layers.2.mlp_v.down_proj.weight": "model-00001-of-00002.safetensors",
254
+ "model.language_model.model.layers.2.mlp_v.gate_proj.weight": "model-00001-of-00002.safetensors",
255
+ "model.language_model.model.layers.2.mlp_v.up_proj.weight": "model-00001-of-00002.safetensors",
256
+ "model.language_model.model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
257
+ "model.language_model.model.layers.2.post_attention_layernorm_v.weight": "model-00001-of-00002.safetensors",
258
+ "model.language_model.model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
259
+ "model.language_model.model.layers.2.self_attn.k_proj_v.weight": "model-00001-of-00002.safetensors",
260
+ "model.language_model.model.layers.2.self_attn.key_layernorm.weight": "model-00001-of-00002.safetensors",
261
+ "model.language_model.model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
262
+ "model.language_model.model.layers.2.self_attn.o_proj_v.weight": "model-00001-of-00002.safetensors",
263
+ "model.language_model.model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
264
+ "model.language_model.model.layers.2.self_attn.q_proj_v.weight": "model-00001-of-00002.safetensors",
265
+ "model.language_model.model.layers.2.self_attn.query_layernorm.weight": "model-00001-of-00002.safetensors",
266
+ "model.language_model.model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
267
+ "model.language_model.model.layers.2.self_attn.v_proj_v.weight": "model-00001-of-00002.safetensors",
268
+ "model.language_model.model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
269
+ "model.language_model.model.layers.20.input_layernorm_v.weight": "model-00001-of-00002.safetensors",
270
+ "model.language_model.model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
271
+ "model.language_model.model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
272
+ "model.language_model.model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
273
+ "model.language_model.model.layers.20.mlp_v.down_proj.weight": "model-00001-of-00002.safetensors",
274
+ "model.language_model.model.layers.20.mlp_v.gate_proj.weight": "model-00001-of-00002.safetensors",
275
+ "model.language_model.model.layers.20.mlp_v.up_proj.weight": "model-00001-of-00002.safetensors",
276
+ "model.language_model.model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
277
+ "model.language_model.model.layers.20.post_attention_layernorm_v.weight": "model-00001-of-00002.safetensors",
278
+ "model.language_model.model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
279
+ "model.language_model.model.layers.20.self_attn.k_proj_v.weight": "model-00001-of-00002.safetensors",
280
+ "model.language_model.model.layers.20.self_attn.key_layernorm.weight": "model-00001-of-00002.safetensors",
281
+ "model.language_model.model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
282
+ "model.language_model.model.layers.20.self_attn.o_proj_v.weight": "model-00001-of-00002.safetensors",
283
+ "model.language_model.model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
284
+ "model.language_model.model.layers.20.self_attn.q_proj_v.weight": "model-00001-of-00002.safetensors",
285
+ "model.language_model.model.layers.20.self_attn.query_layernorm.weight": "model-00001-of-00002.safetensors",
286
+ "model.language_model.model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
287
+ "model.language_model.model.layers.20.self_attn.v_proj_v.weight": "model-00001-of-00002.safetensors",
288
+ "model.language_model.model.layers.21.input_layernorm.weight": "model-00001-of-00002.safetensors",
289
+ "model.language_model.model.layers.21.input_layernorm_v.weight": "model-00001-of-00002.safetensors",
290
+ "model.language_model.model.layers.21.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
291
+ "model.language_model.model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
292
+ "model.language_model.model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
293
+ "model.language_model.model.layers.21.mlp_v.down_proj.weight": "model-00001-of-00002.safetensors",
294
+ "model.language_model.model.layers.21.mlp_v.gate_proj.weight": "model-00001-of-00002.safetensors",
295
+ "model.language_model.model.layers.21.mlp_v.up_proj.weight": "model-00001-of-00002.safetensors",
296
+ "model.language_model.model.layers.21.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
297
+ "model.language_model.model.layers.21.post_attention_layernorm_v.weight": "model-00001-of-00002.safetensors",
298
+ "model.language_model.model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
299
+ "model.language_model.model.layers.21.self_attn.k_proj_v.weight": "model-00001-of-00002.safetensors",
300
+ "model.language_model.model.layers.21.self_attn.key_layernorm.weight": "model-00001-of-00002.safetensors",
301
+ "model.language_model.model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
302
+ "model.language_model.model.layers.21.self_attn.o_proj_v.weight": "model-00001-of-00002.safetensors",
303
+ "model.language_model.model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
304
+ "model.language_model.model.layers.21.self_attn.q_proj_v.weight": "model-00001-of-00002.safetensors",
305
+ "model.language_model.model.layers.21.self_attn.query_layernorm.weight": "model-00001-of-00002.safetensors",
306
+ "model.language_model.model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
307
+ "model.language_model.model.layers.21.self_attn.v_proj_v.weight": "model-00001-of-00002.safetensors",
308
+ "model.language_model.model.layers.22.input_layernorm.weight": "model-00001-of-00002.safetensors",
309
+ "model.language_model.model.layers.22.input_layernorm_v.weight": "model-00001-of-00002.safetensors",
310
+ "model.language_model.model.layers.22.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
311
+ "model.language_model.model.layers.22.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
312
+ "model.language_model.model.layers.22.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
313
+ "model.language_model.model.layers.22.mlp_v.down_proj.weight": "model-00001-of-00002.safetensors",
314
+ "model.language_model.model.layers.22.mlp_v.gate_proj.weight": "model-00001-of-00002.safetensors",
315
+ "model.language_model.model.layers.22.mlp_v.up_proj.weight": "model-00001-of-00002.safetensors",
316
+ "model.language_model.model.layers.22.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
317
+ "model.language_model.model.layers.22.post_attention_layernorm_v.weight": "model-00001-of-00002.safetensors",
318
+ "model.language_model.model.layers.22.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
319
+ "model.language_model.model.layers.22.self_attn.k_proj_v.weight": "model-00001-of-00002.safetensors",
320
+ "model.language_model.model.layers.22.self_attn.key_layernorm.weight": "model-00001-of-00002.safetensors",
321
+ "model.language_model.model.layers.22.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
322
+ "model.language_model.model.layers.22.self_attn.o_proj_v.weight": "model-00001-of-00002.safetensors",
323
+ "model.language_model.model.layers.22.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
324
+ "model.language_model.model.layers.22.self_attn.q_proj_v.weight": "model-00001-of-00002.safetensors",
325
+ "model.language_model.model.layers.22.self_attn.query_layernorm.weight": "model-00001-of-00002.safetensors",
326
+ "model.language_model.model.layers.22.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
327
+ "model.language_model.model.layers.22.self_attn.v_proj_v.weight": "model-00001-of-00002.safetensors",
328
+ "model.language_model.model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
329
+ "model.language_model.model.layers.23.input_layernorm_v.weight": "model-00002-of-00002.safetensors",
330
+ "model.language_model.model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
331
+ "model.language_model.model.layers.23.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
332
+ "model.language_model.model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
333
+ "model.language_model.model.layers.23.mlp_v.down_proj.weight": "model-00002-of-00002.safetensors",
334
+ "model.language_model.model.layers.23.mlp_v.gate_proj.weight": "model-00002-of-00002.safetensors",
335
+ "model.language_model.model.layers.23.mlp_v.up_proj.weight": "model-00002-of-00002.safetensors",
336
+ "model.language_model.model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
337
+ "model.language_model.model.layers.23.post_attention_layernorm_v.weight": "model-00002-of-00002.safetensors",
338
+ "model.language_model.model.layers.23.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
339
+ "model.language_model.model.layers.23.self_attn.k_proj_v.weight": "model-00001-of-00002.safetensors",
340
+ "model.language_model.model.layers.23.self_attn.key_layernorm.weight": "model-00001-of-00002.safetensors",
341
+ "model.language_model.model.layers.23.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
342
+ "model.language_model.model.layers.23.self_attn.o_proj_v.weight": "model-00001-of-00002.safetensors",
343
+ "model.language_model.model.layers.23.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
344
+ "model.language_model.model.layers.23.self_attn.q_proj_v.weight": "model-00001-of-00002.safetensors",
345
+ "model.language_model.model.layers.23.self_attn.query_layernorm.weight": "model-00001-of-00002.safetensors",
346
+ "model.language_model.model.layers.23.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
347
+ "model.language_model.model.layers.23.self_attn.v_proj_v.weight": "model-00001-of-00002.safetensors",
348
+ "model.language_model.model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
349
+ "model.language_model.model.layers.24.input_layernorm_v.weight": "model-00002-of-00002.safetensors",
350
+ "model.language_model.model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
351
+ "model.language_model.model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
352
+ "model.language_model.model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
353
+ "model.language_model.model.layers.24.mlp_v.down_proj.weight": "model-00002-of-00002.safetensors",
354
+ "model.language_model.model.layers.24.mlp_v.gate_proj.weight": "model-00002-of-00002.safetensors",
355
+ "model.language_model.model.layers.24.mlp_v.up_proj.weight": "model-00002-of-00002.safetensors",
356
+ "model.language_model.model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
357
+ "model.language_model.model.layers.24.post_attention_layernorm_v.weight": "model-00002-of-00002.safetensors",
358
+ "model.language_model.model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
359
+ "model.language_model.model.layers.24.self_attn.k_proj_v.weight": "model-00002-of-00002.safetensors",
360
+ "model.language_model.model.layers.24.self_attn.key_layernorm.weight": "model-00002-of-00002.safetensors",
361
+ "model.language_model.model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
362
+ "model.language_model.model.layers.24.self_attn.o_proj_v.weight": "model-00002-of-00002.safetensors",
363
+ "model.language_model.model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
364
+ "model.language_model.model.layers.24.self_attn.q_proj_v.weight": "model-00002-of-00002.safetensors",
365
+ "model.language_model.model.layers.24.self_attn.query_layernorm.weight": "model-00002-of-00002.safetensors",
366
+ "model.language_model.model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
367
+ "model.language_model.model.layers.24.self_attn.v_proj_v.weight": "model-00002-of-00002.safetensors",
368
+ "model.language_model.model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
369
+ "model.language_model.model.layers.25.input_layernorm_v.weight": "model-00002-of-00002.safetensors",
370
+ "model.language_model.model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
371
+ "model.language_model.model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
372
+ "model.language_model.model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
373
+ "model.language_model.model.layers.25.mlp_v.down_proj.weight": "model-00002-of-00002.safetensors",
374
+ "model.language_model.model.layers.25.mlp_v.gate_proj.weight": "model-00002-of-00002.safetensors",
375
+ "model.language_model.model.layers.25.mlp_v.up_proj.weight": "model-00002-of-00002.safetensors",
376
+ "model.language_model.model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
377
+ "model.language_model.model.layers.25.post_attention_layernorm_v.weight": "model-00002-of-00002.safetensors",
378
+ "model.language_model.model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
379
+ "model.language_model.model.layers.25.self_attn.k_proj_v.weight": "model-00002-of-00002.safetensors",
380
+ "model.language_model.model.layers.25.self_attn.key_layernorm.weight": "model-00002-of-00002.safetensors",
381
+ "model.language_model.model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
382
+ "model.language_model.model.layers.25.self_attn.o_proj_v.weight": "model-00002-of-00002.safetensors",
383
+ "model.language_model.model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
384
+ "model.language_model.model.layers.25.self_attn.q_proj_v.weight": "model-00002-of-00002.safetensors",
385
+ "model.language_model.model.layers.25.self_attn.query_layernorm.weight": "model-00002-of-00002.safetensors",
386
+ "model.language_model.model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
387
+ "model.language_model.model.layers.25.self_attn.v_proj_v.weight": "model-00002-of-00002.safetensors",
388
+ "model.language_model.model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
389
+ "model.language_model.model.layers.26.input_layernorm_v.weight": "model-00002-of-00002.safetensors",
390
+ "model.language_model.model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
391
+ "model.language_model.model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
392
+ "model.language_model.model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
393
+ "model.language_model.model.layers.26.mlp_v.down_proj.weight": "model-00002-of-00002.safetensors",
394
+ "model.language_model.model.layers.26.mlp_v.gate_proj.weight": "model-00002-of-00002.safetensors",
395
+ "model.language_model.model.layers.26.mlp_v.up_proj.weight": "model-00002-of-00002.safetensors",
396
+ "model.language_model.model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
397
+ "model.language_model.model.layers.26.post_attention_layernorm_v.weight": "model-00002-of-00002.safetensors",
398
+ "model.language_model.model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
399
+ "model.language_model.model.layers.26.self_attn.k_proj_v.weight": "model-00002-of-00002.safetensors",
400
+ "model.language_model.model.layers.26.self_attn.key_layernorm.weight": "model-00002-of-00002.safetensors",
401
+ "model.language_model.model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
402
+ "model.language_model.model.layers.26.self_attn.o_proj_v.weight": "model-00002-of-00002.safetensors",
403
+ "model.language_model.model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
404
+ "model.language_model.model.layers.26.self_attn.q_proj_v.weight": "model-00002-of-00002.safetensors",
405
+ "model.language_model.model.layers.26.self_attn.query_layernorm.weight": "model-00002-of-00002.safetensors",
406
+ "model.language_model.model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
407
+ "model.language_model.model.layers.26.self_attn.v_proj_v.weight": "model-00002-of-00002.safetensors",
408
+ "model.language_model.model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
409
+ "model.language_model.model.layers.27.input_layernorm_v.weight": "model-00002-of-00002.safetensors",
410
+ "model.language_model.model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
411
+ "model.language_model.model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
412
+ "model.language_model.model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
413
+ "model.language_model.model.layers.27.mlp_v.down_proj.weight": "model-00002-of-00002.safetensors",
414
+ "model.language_model.model.layers.27.mlp_v.gate_proj.weight": "model-00002-of-00002.safetensors",
415
+ "model.language_model.model.layers.27.mlp_v.up_proj.weight": "model-00002-of-00002.safetensors",
416
+ "model.language_model.model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
417
+ "model.language_model.model.layers.27.post_attention_layernorm_v.weight": "model-00002-of-00002.safetensors",
418
+ "model.language_model.model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
419
+ "model.language_model.model.layers.27.self_attn.k_proj_v.weight": "model-00002-of-00002.safetensors",
420
+ "model.language_model.model.layers.27.self_attn.key_layernorm.weight": "model-00002-of-00002.safetensors",
421
+ "model.language_model.model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
422
+ "model.language_model.model.layers.27.self_attn.o_proj_v.weight": "model-00002-of-00002.safetensors",
423
+ "model.language_model.model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
424
+ "model.language_model.model.layers.27.self_attn.q_proj_v.weight": "model-00002-of-00002.safetensors",
425
+ "model.language_model.model.layers.27.self_attn.query_layernorm.weight": "model-00002-of-00002.safetensors",
426
+ "model.language_model.model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
427
+ "model.language_model.model.layers.27.self_attn.v_proj_v.weight": "model-00002-of-00002.safetensors",
428
+ "model.language_model.model.layers.28.input_layernorm.weight": "model-00002-of-00002.safetensors",
429
+ "model.language_model.model.layers.28.input_layernorm_v.weight": "model-00002-of-00002.safetensors",
430
+ "model.language_model.model.layers.28.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
431
+ "model.language_model.model.layers.28.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
432
+ "model.language_model.model.layers.28.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
433
+ "model.language_model.model.layers.28.mlp_v.down_proj.weight": "model-00002-of-00002.safetensors",
434
+ "model.language_model.model.layers.28.mlp_v.gate_proj.weight": "model-00002-of-00002.safetensors",
435
+ "model.language_model.model.layers.28.mlp_v.up_proj.weight": "model-00002-of-00002.safetensors",
436
+ "model.language_model.model.layers.28.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
437
+ "model.language_model.model.layers.28.post_attention_layernorm_v.weight": "model-00002-of-00002.safetensors",
438
+ "model.language_model.model.layers.28.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
439
+ "model.language_model.model.layers.28.self_attn.k_proj_v.weight": "model-00002-of-00002.safetensors",
440
+ "model.language_model.model.layers.28.self_attn.key_layernorm.weight": "model-00002-of-00002.safetensors",
441
+ "model.language_model.model.layers.28.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
442
+ "model.language_model.model.layers.28.self_attn.o_proj_v.weight": "model-00002-of-00002.safetensors",
443
+ "model.language_model.model.layers.28.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
444
+ "model.language_model.model.layers.28.self_attn.q_proj_v.weight": "model-00002-of-00002.safetensors",
445
+ "model.language_model.model.layers.28.self_attn.query_layernorm.weight": "model-00002-of-00002.safetensors",
446
+ "model.language_model.model.layers.28.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
447
+ "model.language_model.model.layers.28.self_attn.v_proj_v.weight": "model-00002-of-00002.safetensors",
448
+ "model.language_model.model.layers.29.input_layernorm.weight": "model-00002-of-00002.safetensors",
449
+ "model.language_model.model.layers.29.input_layernorm_v.weight": "model-00002-of-00002.safetensors",
450
+ "model.language_model.model.layers.29.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
451
+ "model.language_model.model.layers.29.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
452
+ "model.language_model.model.layers.29.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
453
+ "model.language_model.model.layers.29.mlp_v.down_proj.weight": "model-00002-of-00002.safetensors",
454
+ "model.language_model.model.layers.29.mlp_v.gate_proj.weight": "model-00002-of-00002.safetensors",
455
+ "model.language_model.model.layers.29.mlp_v.up_proj.weight": "model-00002-of-00002.safetensors",
456
+ "model.language_model.model.layers.29.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
457
+ "model.language_model.model.layers.29.post_attention_layernorm_v.weight": "model-00002-of-00002.safetensors",
458
+ "model.language_model.model.layers.29.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
459
+ "model.language_model.model.layers.29.self_attn.k_proj_v.weight": "model-00002-of-00002.safetensors",
460
+ "model.language_model.model.layers.29.self_attn.key_layernorm.weight": "model-00002-of-00002.safetensors",
461
+ "model.language_model.model.layers.29.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
462
+ "model.language_model.model.layers.29.self_attn.o_proj_v.weight": "model-00002-of-00002.safetensors",
463
+ "model.language_model.model.layers.29.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
464
+ "model.language_model.model.layers.29.self_attn.q_proj_v.weight": "model-00002-of-00002.safetensors",
465
+ "model.language_model.model.layers.29.self_attn.query_layernorm.weight": "model-00002-of-00002.safetensors",
466
+ "model.language_model.model.layers.29.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
467
+ "model.language_model.model.layers.29.self_attn.v_proj_v.weight": "model-00002-of-00002.safetensors",
468
+ "model.language_model.model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
469
+ "model.language_model.model.layers.3.input_layernorm_v.weight": "model-00001-of-00002.safetensors",
470
+ "model.language_model.model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
471
+ "model.language_model.model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
472
+ "model.language_model.model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
473
+ "model.language_model.model.layers.3.mlp_v.down_proj.weight": "model-00001-of-00002.safetensors",
474
+ "model.language_model.model.layers.3.mlp_v.gate_proj.weight": "model-00001-of-00002.safetensors",
475
+ "model.language_model.model.layers.3.mlp_v.up_proj.weight": "model-00001-of-00002.safetensors",
476
+ "model.language_model.model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
477
+ "model.language_model.model.layers.3.post_attention_layernorm_v.weight": "model-00001-of-00002.safetensors",
478
+ "model.language_model.model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
479
+ "model.language_model.model.layers.3.self_attn.k_proj_v.weight": "model-00001-of-00002.safetensors",
480
+ "model.language_model.model.layers.3.self_attn.key_layernorm.weight": "model-00001-of-00002.safetensors",
481
+ "model.language_model.model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
482
+ "model.language_model.model.layers.3.self_attn.o_proj_v.weight": "model-00001-of-00002.safetensors",
483
+ "model.language_model.model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
484
+ "model.language_model.model.layers.3.self_attn.q_proj_v.weight": "model-00001-of-00002.safetensors",
485
+ "model.language_model.model.layers.3.self_attn.query_layernorm.weight": "model-00001-of-00002.safetensors",
486
+ "model.language_model.model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
487
+ "model.language_model.model.layers.3.self_attn.v_proj_v.weight": "model-00001-of-00002.safetensors",
488
+ "model.language_model.model.layers.30.input_layernorm.weight": "model-00002-of-00002.safetensors",
489
+ "model.language_model.model.layers.30.input_layernorm_v.weight": "model-00002-of-00002.safetensors",
490
+ "model.language_model.model.layers.30.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
491
+ "model.language_model.model.layers.30.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
492
+ "model.language_model.model.layers.30.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
493
+ "model.language_model.model.layers.30.mlp_v.down_proj.weight": "model-00002-of-00002.safetensors",
494
+ "model.language_model.model.layers.30.mlp_v.gate_proj.weight": "model-00002-of-00002.safetensors",
495
+ "model.language_model.model.layers.30.mlp_v.up_proj.weight": "model-00002-of-00002.safetensors",
496
+ "model.language_model.model.layers.30.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
497
+ "model.language_model.model.layers.30.post_attention_layernorm_v.weight": "model-00002-of-00002.safetensors",
498
+ "model.language_model.model.layers.30.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
499
+ "model.language_model.model.layers.30.self_attn.k_proj_v.weight": "model-00002-of-00002.safetensors",
500
+ "model.language_model.model.layers.30.self_attn.key_layernorm.weight": "model-00002-of-00002.safetensors",
501
+ "model.language_model.model.layers.30.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
502
+ "model.language_model.model.layers.30.self_attn.o_proj_v.weight": "model-00002-of-00002.safetensors",
503
+ "model.language_model.model.layers.30.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
504
+ "model.language_model.model.layers.30.self_attn.q_proj_v.weight": "model-00002-of-00002.safetensors",
505
+ "model.language_model.model.layers.30.self_attn.query_layernorm.weight": "model-00002-of-00002.safetensors",
506
+ "model.language_model.model.layers.30.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
507
+ "model.language_model.model.layers.30.self_attn.v_proj_v.weight": "model-00002-of-00002.safetensors",
508
+ "model.language_model.model.layers.31.input_layernorm.weight": "model-00002-of-00002.safetensors",
509
+ "model.language_model.model.layers.31.input_layernorm_v.weight": "model-00002-of-00002.safetensors",
510
+ "model.language_model.model.layers.31.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
511
+ "model.language_model.model.layers.31.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
512
+ "model.language_model.model.layers.31.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
513
+ "model.language_model.model.layers.31.mlp_v.down_proj.weight": "model-00002-of-00002.safetensors",
514
+ "model.language_model.model.layers.31.mlp_v.gate_proj.weight": "model-00002-of-00002.safetensors",
515
+ "model.language_model.model.layers.31.mlp_v.up_proj.weight": "model-00002-of-00002.safetensors",
516
+ "model.language_model.model.layers.31.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
517
+ "model.language_model.model.layers.31.post_attention_layernorm_v.weight": "model-00002-of-00002.safetensors",
518
+ "model.language_model.model.layers.31.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
519
+ "model.language_model.model.layers.31.self_attn.k_proj_v.weight": "model-00002-of-00002.safetensors",
520
+ "model.language_model.model.layers.31.self_attn.key_layernorm.weight": "model-00002-of-00002.safetensors",
521
+ "model.language_model.model.layers.31.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
522
+ "model.language_model.model.layers.31.self_attn.o_proj_v.weight": "model-00002-of-00002.safetensors",
523
+ "model.language_model.model.layers.31.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
524
+ "model.language_model.model.layers.31.self_attn.q_proj_v.weight": "model-00002-of-00002.safetensors",
525
+ "model.language_model.model.layers.31.self_attn.query_layernorm.weight": "model-00002-of-00002.safetensors",
526
+ "model.language_model.model.layers.31.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
527
+ "model.language_model.model.layers.31.self_attn.v_proj_v.weight": "model-00002-of-00002.safetensors",
528
+ "model.language_model.model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
529
+ "model.language_model.model.layers.4.input_layernorm_v.weight": "model-00001-of-00002.safetensors",
530
+ "model.language_model.model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
531
+ "model.language_model.model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
532
+ "model.language_model.model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
533
+ "model.language_model.model.layers.4.mlp_v.down_proj.weight": "model-00001-of-00002.safetensors",
534
+ "model.language_model.model.layers.4.mlp_v.gate_proj.weight": "model-00001-of-00002.safetensors",
535
+ "model.language_model.model.layers.4.mlp_v.up_proj.weight": "model-00001-of-00002.safetensors",
536
+ "model.language_model.model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
537
+ "model.language_model.model.layers.4.post_attention_layernorm_v.weight": "model-00001-of-00002.safetensors",
538
+ "model.language_model.model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
539
+ "model.language_model.model.layers.4.self_attn.k_proj_v.weight": "model-00001-of-00002.safetensors",
540
+ "model.language_model.model.layers.4.self_attn.key_layernorm.weight": "model-00001-of-00002.safetensors",
541
+ "model.language_model.model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
542
+ "model.language_model.model.layers.4.self_attn.o_proj_v.weight": "model-00001-of-00002.safetensors",
543
+ "model.language_model.model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
544
+ "model.language_model.model.layers.4.self_attn.q_proj_v.weight": "model-00001-of-00002.safetensors",
545
+ "model.language_model.model.layers.4.self_attn.query_layernorm.weight": "model-00001-of-00002.safetensors",
546
+ "model.language_model.model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
547
+ "model.language_model.model.layers.4.self_attn.v_proj_v.weight": "model-00001-of-00002.safetensors",
548
+ "model.language_model.model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
549
+ "model.language_model.model.layers.5.input_layernorm_v.weight": "model-00001-of-00002.safetensors",
550
+ "model.language_model.model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
551
+ "model.language_model.model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
552
+ "model.language_model.model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
553
+ "model.language_model.model.layers.5.mlp_v.down_proj.weight": "model-00001-of-00002.safetensors",
554
+ "model.language_model.model.layers.5.mlp_v.gate_proj.weight": "model-00001-of-00002.safetensors",
555
+ "model.language_model.model.layers.5.mlp_v.up_proj.weight": "model-00001-of-00002.safetensors",
556
+ "model.language_model.model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
557
+ "model.language_model.model.layers.5.post_attention_layernorm_v.weight": "model-00001-of-00002.safetensors",
558
+ "model.language_model.model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
559
+ "model.language_model.model.layers.5.self_attn.k_proj_v.weight": "model-00001-of-00002.safetensors",
560
+ "model.language_model.model.layers.5.self_attn.key_layernorm.weight": "model-00001-of-00002.safetensors",
561
+ "model.language_model.model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
562
+ "model.language_model.model.layers.5.self_attn.o_proj_v.weight": "model-00001-of-00002.safetensors",
563
+ "model.language_model.model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
564
+ "model.language_model.model.layers.5.self_attn.q_proj_v.weight": "model-00001-of-00002.safetensors",
565
+ "model.language_model.model.layers.5.self_attn.query_layernorm.weight": "model-00001-of-00002.safetensors",
566
+ "model.language_model.model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
567
+ "model.language_model.model.layers.5.self_attn.v_proj_v.weight": "model-00001-of-00002.safetensors",
568
+ "model.language_model.model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
569
+ "model.language_model.model.layers.6.input_layernorm_v.weight": "model-00001-of-00002.safetensors",
570
+ "model.language_model.model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
571
+ "model.language_model.model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
572
+ "model.language_model.model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
573
+ "model.language_model.model.layers.6.mlp_v.down_proj.weight": "model-00001-of-00002.safetensors",
574
+ "model.language_model.model.layers.6.mlp_v.gate_proj.weight": "model-00001-of-00002.safetensors",
575
+ "model.language_model.model.layers.6.mlp_v.up_proj.weight": "model-00001-of-00002.safetensors",
576
+ "model.language_model.model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
577
+ "model.language_model.model.layers.6.post_attention_layernorm_v.weight": "model-00001-of-00002.safetensors",
578
+ "model.language_model.model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
579
+ "model.language_model.model.layers.6.self_attn.k_proj_v.weight": "model-00001-of-00002.safetensors",
580
+ "model.language_model.model.layers.6.self_attn.key_layernorm.weight": "model-00001-of-00002.safetensors",
581
+ "model.language_model.model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
582
+ "model.language_model.model.layers.6.self_attn.o_proj_v.weight": "model-00001-of-00002.safetensors",
583
+ "model.language_model.model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
584
+ "model.language_model.model.layers.6.self_attn.q_proj_v.weight": "model-00001-of-00002.safetensors",
585
+ "model.language_model.model.layers.6.self_attn.query_layernorm.weight": "model-00001-of-00002.safetensors",
586
+ "model.language_model.model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
587
+ "model.language_model.model.layers.6.self_attn.v_proj_v.weight": "model-00001-of-00002.safetensors",
588
+ "model.language_model.model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
589
+ "model.language_model.model.layers.7.input_layernorm_v.weight": "model-00001-of-00002.safetensors",
590
+ "model.language_model.model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
591
+ "model.language_model.model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
592
+ "model.language_model.model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
593
+ "model.language_model.model.layers.7.mlp_v.down_proj.weight": "model-00001-of-00002.safetensors",
594
+ "model.language_model.model.layers.7.mlp_v.gate_proj.weight": "model-00001-of-00002.safetensors",
595
+ "model.language_model.model.layers.7.mlp_v.up_proj.weight": "model-00001-of-00002.safetensors",
596
+ "model.language_model.model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
597
+ "model.language_model.model.layers.7.post_attention_layernorm_v.weight": "model-00001-of-00002.safetensors",
598
+ "model.language_model.model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
599
+ "model.language_model.model.layers.7.self_attn.k_proj_v.weight": "model-00001-of-00002.safetensors",
600
+ "model.language_model.model.layers.7.self_attn.key_layernorm.weight": "model-00001-of-00002.safetensors",
601
+ "model.language_model.model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
602
+ "model.language_model.model.layers.7.self_attn.o_proj_v.weight": "model-00001-of-00002.safetensors",
603
+ "model.language_model.model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
604
+ "model.language_model.model.layers.7.self_attn.q_proj_v.weight": "model-00001-of-00002.safetensors",
605
+ "model.language_model.model.layers.7.self_attn.query_layernorm.weight": "model-00001-of-00002.safetensors",
606
+ "model.language_model.model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
607
+ "model.language_model.model.layers.7.self_attn.v_proj_v.weight": "model-00001-of-00002.safetensors",
608
+ "model.language_model.model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
609
+ "model.language_model.model.layers.8.input_layernorm_v.weight": "model-00001-of-00002.safetensors",
610
+ "model.language_model.model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
611
+ "model.language_model.model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
612
+ "model.language_model.model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
613
+ "model.language_model.model.layers.8.mlp_v.down_proj.weight": "model-00001-of-00002.safetensors",
614
+ "model.language_model.model.layers.8.mlp_v.gate_proj.weight": "model-00001-of-00002.safetensors",
615
+ "model.language_model.model.layers.8.mlp_v.up_proj.weight": "model-00001-of-00002.safetensors",
616
+ "model.language_model.model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
617
+ "model.language_model.model.layers.8.post_attention_layernorm_v.weight": "model-00001-of-00002.safetensors",
618
+ "model.language_model.model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
619
+ "model.language_model.model.layers.8.self_attn.k_proj_v.weight": "model-00001-of-00002.safetensors",
620
+ "model.language_model.model.layers.8.self_attn.key_layernorm.weight": "model-00001-of-00002.safetensors",
621
+ "model.language_model.model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
622
+ "model.language_model.model.layers.8.self_attn.o_proj_v.weight": "model-00001-of-00002.safetensors",
623
+ "model.language_model.model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
624
+ "model.language_model.model.layers.8.self_attn.q_proj_v.weight": "model-00001-of-00002.safetensors",
625
+ "model.language_model.model.layers.8.self_attn.query_layernorm.weight": "model-00001-of-00002.safetensors",
626
+ "model.language_model.model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
627
+ "model.language_model.model.layers.8.self_attn.v_proj_v.weight": "model-00001-of-00002.safetensors",
628
+ "model.language_model.model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
629
+ "model.language_model.model.layers.9.input_layernorm_v.weight": "model-00001-of-00002.safetensors",
630
+ "model.language_model.model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
631
+ "model.language_model.model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
632
+ "model.language_model.model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
633
+ "model.language_model.model.layers.9.mlp_v.down_proj.weight": "model-00001-of-00002.safetensors",
634
+ "model.language_model.model.layers.9.mlp_v.gate_proj.weight": "model-00001-of-00002.safetensors",
635
+ "model.language_model.model.layers.9.mlp_v.up_proj.weight": "model-00001-of-00002.safetensors",
636
+ "model.language_model.model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
637
+ "model.language_model.model.layers.9.post_attention_layernorm_v.weight": "model-00001-of-00002.safetensors",
638
+ "model.language_model.model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
639
+ "model.language_model.model.layers.9.self_attn.k_proj_v.weight": "model-00001-of-00002.safetensors",
640
+ "model.language_model.model.layers.9.self_attn.key_layernorm.weight": "model-00001-of-00002.safetensors",
641
+ "model.language_model.model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
642
+ "model.language_model.model.layers.9.self_attn.o_proj_v.weight": "model-00001-of-00002.safetensors",
643
+ "model.language_model.model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
644
+ "model.language_model.model.layers.9.self_attn.q_proj_v.weight": "model-00001-of-00002.safetensors",
645
+ "model.language_model.model.layers.9.self_attn.query_layernorm.weight": "model-00001-of-00002.safetensors",
646
+ "model.language_model.model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
647
+ "model.language_model.model.layers.9.self_attn.v_proj_v.weight": "model-00001-of-00002.safetensors",
648
+ "model.language_model.model.norm.weight": "model-00002-of-00002.safetensors",
649
+ "model.visual.merger.pooler.predictor.0.bias": "model-00002-of-00002.safetensors",
650
+ "model.visual.merger.pooler.predictor.0.weight": "model-00002-of-00002.safetensors",
651
+ "model.visual.merger.pooler.predictor.2.bias": "model-00002-of-00002.safetensors",
652
+ "model.visual.merger.pooler.predictor.2.weight": "model-00002-of-00002.safetensors",
653
+ "model.visual.merger.proj1.bias": "model-00002-of-00002.safetensors",
654
+ "model.visual.merger.proj1.weight": "model-00002-of-00002.safetensors",
655
+ "model.visual.merger.proj2.bias": "model-00002-of-00002.safetensors",
656
+ "model.visual.merger.proj2.weight": "model-00002-of-00002.safetensors",
657
+ "model.visual.vision_tower.blocks.0.attn.proj.bias": "model-00002-of-00002.safetensors",
658
+ "model.visual.vision_tower.blocks.0.attn.proj.weight": "model-00002-of-00002.safetensors",
659
+ "model.visual.vision_tower.blocks.0.attn.qkv.bias": "model-00002-of-00002.safetensors",
660
+ "model.visual.vision_tower.blocks.0.attn.qkv.weight": "model-00002-of-00002.safetensors",
661
+ "model.visual.vision_tower.blocks.0.mlp.fc1.bias": "model-00002-of-00002.safetensors",
662
+ "model.visual.vision_tower.blocks.0.mlp.fc1.weight": "model-00002-of-00002.safetensors",
663
+ "model.visual.vision_tower.blocks.0.mlp.fc2.bias": "model-00002-of-00002.safetensors",
664
+ "model.visual.vision_tower.blocks.0.mlp.fc2.weight": "model-00002-of-00002.safetensors",
665
+ "model.visual.vision_tower.blocks.0.norm1.bias": "model-00002-of-00002.safetensors",
666
+ "model.visual.vision_tower.blocks.0.norm1.weight": "model-00002-of-00002.safetensors",
667
+ "model.visual.vision_tower.blocks.0.norm2.bias": "model-00002-of-00002.safetensors",
668
+ "model.visual.vision_tower.blocks.0.norm2.weight": "model-00002-of-00002.safetensors",
669
+ "model.visual.vision_tower.blocks.1.attn.proj.bias": "model-00002-of-00002.safetensors",
670
+ "model.visual.vision_tower.blocks.1.attn.proj.weight": "model-00002-of-00002.safetensors",
671
+ "model.visual.vision_tower.blocks.1.attn.qkv.bias": "model-00002-of-00002.safetensors",
672
+ "model.visual.vision_tower.blocks.1.attn.qkv.weight": "model-00002-of-00002.safetensors",
673
+ "model.visual.vision_tower.blocks.1.mlp.fc1.bias": "model-00002-of-00002.safetensors",
674
+ "model.visual.vision_tower.blocks.1.mlp.fc1.weight": "model-00002-of-00002.safetensors",
675
+ "model.visual.vision_tower.blocks.1.mlp.fc2.bias": "model-00002-of-00002.safetensors",
676
+ "model.visual.vision_tower.blocks.1.mlp.fc2.weight": "model-00002-of-00002.safetensors",
677
+ "model.visual.vision_tower.blocks.1.norm1.bias": "model-00002-of-00002.safetensors",
678
+ "model.visual.vision_tower.blocks.1.norm1.weight": "model-00002-of-00002.safetensors",
679
+ "model.visual.vision_tower.blocks.1.norm2.bias": "model-00002-of-00002.safetensors",
680
+ "model.visual.vision_tower.blocks.1.norm2.weight": "model-00002-of-00002.safetensors",
681
+ "model.visual.vision_tower.blocks.10.attn.proj.bias": "model-00002-of-00002.safetensors",
682
+ "model.visual.vision_tower.blocks.10.attn.proj.weight": "model-00002-of-00002.safetensors",
683
+ "model.visual.vision_tower.blocks.10.attn.qkv.bias": "model-00002-of-00002.safetensors",
684
+ "model.visual.vision_tower.blocks.10.attn.qkv.weight": "model-00002-of-00002.safetensors",
685
+ "model.visual.vision_tower.blocks.10.mlp.fc1.bias": "model-00002-of-00002.safetensors",
686
+ "model.visual.vision_tower.blocks.10.mlp.fc1.weight": "model-00002-of-00002.safetensors",
687
+ "model.visual.vision_tower.blocks.10.mlp.fc2.bias": "model-00002-of-00002.safetensors",
688
+ "model.visual.vision_tower.blocks.10.mlp.fc2.weight": "model-00002-of-00002.safetensors",
689
+ "model.visual.vision_tower.blocks.10.norm1.bias": "model-00002-of-00002.safetensors",
690
+ "model.visual.vision_tower.blocks.10.norm1.weight": "model-00002-of-00002.safetensors",
691
+ "model.visual.vision_tower.blocks.10.norm2.bias": "model-00002-of-00002.safetensors",
692
+ "model.visual.vision_tower.blocks.10.norm2.weight": "model-00002-of-00002.safetensors",
693
+ "model.visual.vision_tower.blocks.11.attn.proj.bias": "model-00002-of-00002.safetensors",
694
+ "model.visual.vision_tower.blocks.11.attn.proj.weight": "model-00002-of-00002.safetensors",
695
+ "model.visual.vision_tower.blocks.11.attn.qkv.bias": "model-00002-of-00002.safetensors",
696
+ "model.visual.vision_tower.blocks.11.attn.qkv.weight": "model-00002-of-00002.safetensors",
697
+ "model.visual.vision_tower.blocks.11.mlp.fc1.bias": "model-00002-of-00002.safetensors",
698
+ "model.visual.vision_tower.blocks.11.mlp.fc1.weight": "model-00002-of-00002.safetensors",
699
+ "model.visual.vision_tower.blocks.11.mlp.fc2.bias": "model-00002-of-00002.safetensors",
700
+ "model.visual.vision_tower.blocks.11.mlp.fc2.weight": "model-00002-of-00002.safetensors",
701
+ "model.visual.vision_tower.blocks.11.norm1.bias": "model-00002-of-00002.safetensors",
702
+ "model.visual.vision_tower.blocks.11.norm1.weight": "model-00002-of-00002.safetensors",
703
+ "model.visual.vision_tower.blocks.11.norm2.bias": "model-00002-of-00002.safetensors",
704
+ "model.visual.vision_tower.blocks.11.norm2.weight": "model-00002-of-00002.safetensors",
705
+ "model.visual.vision_tower.blocks.12.attn.proj.bias": "model-00002-of-00002.safetensors",
706
+ "model.visual.vision_tower.blocks.12.attn.proj.weight": "model-00002-of-00002.safetensors",
707
+ "model.visual.vision_tower.blocks.12.attn.qkv.bias": "model-00002-of-00002.safetensors",
708
+ "model.visual.vision_tower.blocks.12.attn.qkv.weight": "model-00002-of-00002.safetensors",
709
+ "model.visual.vision_tower.blocks.12.mlp.fc1.bias": "model-00002-of-00002.safetensors",
710
+ "model.visual.vision_tower.blocks.12.mlp.fc1.weight": "model-00002-of-00002.safetensors",
711
+ "model.visual.vision_tower.blocks.12.mlp.fc2.bias": "model-00002-of-00002.safetensors",
712
+ "model.visual.vision_tower.blocks.12.mlp.fc2.weight": "model-00002-of-00002.safetensors",
713
+ "model.visual.vision_tower.blocks.12.norm1.bias": "model-00002-of-00002.safetensors",
714
+ "model.visual.vision_tower.blocks.12.norm1.weight": "model-00002-of-00002.safetensors",
715
+ "model.visual.vision_tower.blocks.12.norm2.bias": "model-00002-of-00002.safetensors",
716
+ "model.visual.vision_tower.blocks.12.norm2.weight": "model-00002-of-00002.safetensors",
717
+ "model.visual.vision_tower.blocks.13.attn.proj.bias": "model-00002-of-00002.safetensors",
718
+ "model.visual.vision_tower.blocks.13.attn.proj.weight": "model-00002-of-00002.safetensors",
719
+ "model.visual.vision_tower.blocks.13.attn.qkv.bias": "model-00002-of-00002.safetensors",
720
+ "model.visual.vision_tower.blocks.13.attn.qkv.weight": "model-00002-of-00002.safetensors",
721
+ "model.visual.vision_tower.blocks.13.mlp.fc1.bias": "model-00002-of-00002.safetensors",
722
+ "model.visual.vision_tower.blocks.13.mlp.fc1.weight": "model-00002-of-00002.safetensors",
723
+ "model.visual.vision_tower.blocks.13.mlp.fc2.bias": "model-00002-of-00002.safetensors",
724
+ "model.visual.vision_tower.blocks.13.mlp.fc2.weight": "model-00002-of-00002.safetensors",
725
+ "model.visual.vision_tower.blocks.13.norm1.bias": "model-00002-of-00002.safetensors",
726
+ "model.visual.vision_tower.blocks.13.norm1.weight": "model-00002-of-00002.safetensors",
727
+ "model.visual.vision_tower.blocks.13.norm2.bias": "model-00002-of-00002.safetensors",
728
+ "model.visual.vision_tower.blocks.13.norm2.weight": "model-00002-of-00002.safetensors",
729
+ "model.visual.vision_tower.blocks.14.attn.proj.bias": "model-00002-of-00002.safetensors",
730
+ "model.visual.vision_tower.blocks.14.attn.proj.weight": "model-00002-of-00002.safetensors",
731
+ "model.visual.vision_tower.blocks.14.attn.qkv.bias": "model-00002-of-00002.safetensors",
732
+ "model.visual.vision_tower.blocks.14.attn.qkv.weight": "model-00002-of-00002.safetensors",
733
+ "model.visual.vision_tower.blocks.14.mlp.fc1.bias": "model-00002-of-00002.safetensors",
734
+ "model.visual.vision_tower.blocks.14.mlp.fc1.weight": "model-00002-of-00002.safetensors",
735
+ "model.visual.vision_tower.blocks.14.mlp.fc2.bias": "model-00002-of-00002.safetensors",
736
+ "model.visual.vision_tower.blocks.14.mlp.fc2.weight": "model-00002-of-00002.safetensors",
737
+ "model.visual.vision_tower.blocks.14.norm1.bias": "model-00002-of-00002.safetensors",
738
+ "model.visual.vision_tower.blocks.14.norm1.weight": "model-00002-of-00002.safetensors",
739
+ "model.visual.vision_tower.blocks.14.norm2.bias": "model-00002-of-00002.safetensors",
740
+ "model.visual.vision_tower.blocks.14.norm2.weight": "model-00002-of-00002.safetensors",
741
+ "model.visual.vision_tower.blocks.15.attn.proj.bias": "model-00002-of-00002.safetensors",
742
+ "model.visual.vision_tower.blocks.15.attn.proj.weight": "model-00002-of-00002.safetensors",
743
+ "model.visual.vision_tower.blocks.15.attn.qkv.bias": "model-00002-of-00002.safetensors",
744
+ "model.visual.vision_tower.blocks.15.attn.qkv.weight": "model-00002-of-00002.safetensors",
745
+ "model.visual.vision_tower.blocks.15.mlp.fc1.bias": "model-00002-of-00002.safetensors",
746
+ "model.visual.vision_tower.blocks.15.mlp.fc1.weight": "model-00002-of-00002.safetensors",
747
+ "model.visual.vision_tower.blocks.15.mlp.fc2.bias": "model-00002-of-00002.safetensors",
748
+ "model.visual.vision_tower.blocks.15.mlp.fc2.weight": "model-00002-of-00002.safetensors",
749
+ "model.visual.vision_tower.blocks.15.norm1.bias": "model-00002-of-00002.safetensors",
750
+ "model.visual.vision_tower.blocks.15.norm1.weight": "model-00002-of-00002.safetensors",
751
+ "model.visual.vision_tower.blocks.15.norm2.bias": "model-00002-of-00002.safetensors",
752
+ "model.visual.vision_tower.blocks.15.norm2.weight": "model-00002-of-00002.safetensors",
753
+ "model.visual.vision_tower.blocks.16.attn.proj.bias": "model-00002-of-00002.safetensors",
754
+ "model.visual.vision_tower.blocks.16.attn.proj.weight": "model-00002-of-00002.safetensors",
755
+ "model.visual.vision_tower.blocks.16.attn.qkv.bias": "model-00002-of-00002.safetensors",
756
+ "model.visual.vision_tower.blocks.16.attn.qkv.weight": "model-00002-of-00002.safetensors",
757
+ "model.visual.vision_tower.blocks.16.mlp.fc1.bias": "model-00002-of-00002.safetensors",
758
+ "model.visual.vision_tower.blocks.16.mlp.fc1.weight": "model-00002-of-00002.safetensors",
759
+ "model.visual.vision_tower.blocks.16.mlp.fc2.bias": "model-00002-of-00002.safetensors",
760
+ "model.visual.vision_tower.blocks.16.mlp.fc2.weight": "model-00002-of-00002.safetensors",
761
+ "model.visual.vision_tower.blocks.16.norm1.bias": "model-00002-of-00002.safetensors",
762
+ "model.visual.vision_tower.blocks.16.norm1.weight": "model-00002-of-00002.safetensors",
763
+ "model.visual.vision_tower.blocks.16.norm2.bias": "model-00002-of-00002.safetensors",
764
+ "model.visual.vision_tower.blocks.16.norm2.weight": "model-00002-of-00002.safetensors",
765
+ "model.visual.vision_tower.blocks.17.attn.proj.bias": "model-00002-of-00002.safetensors",
766
+ "model.visual.vision_tower.blocks.17.attn.proj.weight": "model-00002-of-00002.safetensors",
767
+ "model.visual.vision_tower.blocks.17.attn.qkv.bias": "model-00002-of-00002.safetensors",
768
+ "model.visual.vision_tower.blocks.17.attn.qkv.weight": "model-00002-of-00002.safetensors",
769
+ "model.visual.vision_tower.blocks.17.mlp.fc1.bias": "model-00002-of-00002.safetensors",
770
+ "model.visual.vision_tower.blocks.17.mlp.fc1.weight": "model-00002-of-00002.safetensors",
771
+ "model.visual.vision_tower.blocks.17.mlp.fc2.bias": "model-00002-of-00002.safetensors",
772
+ "model.visual.vision_tower.blocks.17.mlp.fc2.weight": "model-00002-of-00002.safetensors",
773
+ "model.visual.vision_tower.blocks.17.norm1.bias": "model-00002-of-00002.safetensors",
774
+ "model.visual.vision_tower.blocks.17.norm1.weight": "model-00002-of-00002.safetensors",
775
+ "model.visual.vision_tower.blocks.17.norm2.bias": "model-00002-of-00002.safetensors",
776
+ "model.visual.vision_tower.blocks.17.norm2.weight": "model-00002-of-00002.safetensors",
777
+ "model.visual.vision_tower.blocks.18.attn.proj.bias": "model-00002-of-00002.safetensors",
778
+ "model.visual.vision_tower.blocks.18.attn.proj.weight": "model-00002-of-00002.safetensors",
779
+ "model.visual.vision_tower.blocks.18.attn.qkv.bias": "model-00002-of-00002.safetensors",
780
+ "model.visual.vision_tower.blocks.18.attn.qkv.weight": "model-00002-of-00002.safetensors",
781
+ "model.visual.vision_tower.blocks.18.mlp.fc1.bias": "model-00002-of-00002.safetensors",
782
+ "model.visual.vision_tower.blocks.18.mlp.fc1.weight": "model-00002-of-00002.safetensors",
783
+ "model.visual.vision_tower.blocks.18.mlp.fc2.bias": "model-00002-of-00002.safetensors",
784
+ "model.visual.vision_tower.blocks.18.mlp.fc2.weight": "model-00002-of-00002.safetensors",
785
+ "model.visual.vision_tower.blocks.18.norm1.bias": "model-00002-of-00002.safetensors",
786
+ "model.visual.vision_tower.blocks.18.norm1.weight": "model-00002-of-00002.safetensors",
787
+ "model.visual.vision_tower.blocks.18.norm2.bias": "model-00002-of-00002.safetensors",
788
+ "model.visual.vision_tower.blocks.18.norm2.weight": "model-00002-of-00002.safetensors",
789
+ "model.visual.vision_tower.blocks.19.attn.proj.bias": "model-00002-of-00002.safetensors",
790
+ "model.visual.vision_tower.blocks.19.attn.proj.weight": "model-00002-of-00002.safetensors",
791
+ "model.visual.vision_tower.blocks.19.attn.qkv.bias": "model-00002-of-00002.safetensors",
792
+ "model.visual.vision_tower.blocks.19.attn.qkv.weight": "model-00002-of-00002.safetensors",
793
+ "model.visual.vision_tower.blocks.19.mlp.fc1.bias": "model-00002-of-00002.safetensors",
794
+ "model.visual.vision_tower.blocks.19.mlp.fc1.weight": "model-00002-of-00002.safetensors",
795
+ "model.visual.vision_tower.blocks.19.mlp.fc2.bias": "model-00002-of-00002.safetensors",
796
+ "model.visual.vision_tower.blocks.19.mlp.fc2.weight": "model-00002-of-00002.safetensors",
797
+ "model.visual.vision_tower.blocks.19.norm1.bias": "model-00002-of-00002.safetensors",
798
+ "model.visual.vision_tower.blocks.19.norm1.weight": "model-00002-of-00002.safetensors",
799
+ "model.visual.vision_tower.blocks.19.norm2.bias": "model-00002-of-00002.safetensors",
800
+ "model.visual.vision_tower.blocks.19.norm2.weight": "model-00002-of-00002.safetensors",
801
+ "model.visual.vision_tower.blocks.2.attn.proj.bias": "model-00002-of-00002.safetensors",
802
+ "model.visual.vision_tower.blocks.2.attn.proj.weight": "model-00002-of-00002.safetensors",
803
+ "model.visual.vision_tower.blocks.2.attn.qkv.bias": "model-00002-of-00002.safetensors",
804
+ "model.visual.vision_tower.blocks.2.attn.qkv.weight": "model-00002-of-00002.safetensors",
805
+ "model.visual.vision_tower.blocks.2.mlp.fc1.bias": "model-00002-of-00002.safetensors",
806
+ "model.visual.vision_tower.blocks.2.mlp.fc1.weight": "model-00002-of-00002.safetensors",
807
+ "model.visual.vision_tower.blocks.2.mlp.fc2.bias": "model-00002-of-00002.safetensors",
808
+ "model.visual.vision_tower.blocks.2.mlp.fc2.weight": "model-00002-of-00002.safetensors",
809
+ "model.visual.vision_tower.blocks.2.norm1.bias": "model-00002-of-00002.safetensors",
810
+ "model.visual.vision_tower.blocks.2.norm1.weight": "model-00002-of-00002.safetensors",
811
+ "model.visual.vision_tower.blocks.2.norm2.bias": "model-00002-of-00002.safetensors",
812
+ "model.visual.vision_tower.blocks.2.norm2.weight": "model-00002-of-00002.safetensors",
813
+ "model.visual.vision_tower.blocks.20.attn.proj.bias": "model-00002-of-00002.safetensors",
814
+ "model.visual.vision_tower.blocks.20.attn.proj.weight": "model-00002-of-00002.safetensors",
815
+ "model.visual.vision_tower.blocks.20.attn.qkv.bias": "model-00002-of-00002.safetensors",
816
+ "model.visual.vision_tower.blocks.20.attn.qkv.weight": "model-00002-of-00002.safetensors",
817
+ "model.visual.vision_tower.blocks.20.mlp.fc1.bias": "model-00002-of-00002.safetensors",
818
+ "model.visual.vision_tower.blocks.20.mlp.fc1.weight": "model-00002-of-00002.safetensors",
819
+ "model.visual.vision_tower.blocks.20.mlp.fc2.bias": "model-00002-of-00002.safetensors",
820
+ "model.visual.vision_tower.blocks.20.mlp.fc2.weight": "model-00002-of-00002.safetensors",
821
+ "model.visual.vision_tower.blocks.20.norm1.bias": "model-00002-of-00002.safetensors",
822
+ "model.visual.vision_tower.blocks.20.norm1.weight": "model-00002-of-00002.safetensors",
823
+ "model.visual.vision_tower.blocks.20.norm2.bias": "model-00002-of-00002.safetensors",
824
+ "model.visual.vision_tower.blocks.20.norm2.weight": "model-00002-of-00002.safetensors",
825
+ "model.visual.vision_tower.blocks.21.attn.proj.bias": "model-00002-of-00002.safetensors",
826
+ "model.visual.vision_tower.blocks.21.attn.proj.weight": "model-00002-of-00002.safetensors",
827
+ "model.visual.vision_tower.blocks.21.attn.qkv.bias": "model-00002-of-00002.safetensors",
828
+ "model.visual.vision_tower.blocks.21.attn.qkv.weight": "model-00002-of-00002.safetensors",
829
+ "model.visual.vision_tower.blocks.21.mlp.fc1.bias": "model-00002-of-00002.safetensors",
830
+ "model.visual.vision_tower.blocks.21.mlp.fc1.weight": "model-00002-of-00002.safetensors",
831
+ "model.visual.vision_tower.blocks.21.mlp.fc2.bias": "model-00002-of-00002.safetensors",
832
+ "model.visual.vision_tower.blocks.21.mlp.fc2.weight": "model-00002-of-00002.safetensors",
833
+ "model.visual.vision_tower.blocks.21.norm1.bias": "model-00002-of-00002.safetensors",
834
+ "model.visual.vision_tower.blocks.21.norm1.weight": "model-00002-of-00002.safetensors",
835
+ "model.visual.vision_tower.blocks.21.norm2.bias": "model-00002-of-00002.safetensors",
836
+ "model.visual.vision_tower.blocks.21.norm2.weight": "model-00002-of-00002.safetensors",
837
+ "model.visual.vision_tower.blocks.22.attn.proj.bias": "model-00002-of-00002.safetensors",
838
+ "model.visual.vision_tower.blocks.22.attn.proj.weight": "model-00002-of-00002.safetensors",
839
+ "model.visual.vision_tower.blocks.22.attn.qkv.bias": "model-00002-of-00002.safetensors",
840
+ "model.visual.vision_tower.blocks.22.attn.qkv.weight": "model-00002-of-00002.safetensors",
841
+ "model.visual.vision_tower.blocks.22.mlp.fc1.bias": "model-00002-of-00002.safetensors",
842
+ "model.visual.vision_tower.blocks.22.mlp.fc1.weight": "model-00002-of-00002.safetensors",
843
+ "model.visual.vision_tower.blocks.22.mlp.fc2.bias": "model-00002-of-00002.safetensors",
844
+ "model.visual.vision_tower.blocks.22.mlp.fc2.weight": "model-00002-of-00002.safetensors",
845
+ "model.visual.vision_tower.blocks.22.norm1.bias": "model-00002-of-00002.safetensors",
846
+ "model.visual.vision_tower.blocks.22.norm1.weight": "model-00002-of-00002.safetensors",
847
+ "model.visual.vision_tower.blocks.22.norm2.bias": "model-00002-of-00002.safetensors",
848
+ "model.visual.vision_tower.blocks.22.norm2.weight": "model-00002-of-00002.safetensors",
849
+ "model.visual.vision_tower.blocks.23.attn.proj.bias": "model-00002-of-00002.safetensors",
850
+ "model.visual.vision_tower.blocks.23.attn.proj.weight": "model-00002-of-00002.safetensors",
851
+ "model.visual.vision_tower.blocks.23.attn.qkv.bias": "model-00002-of-00002.safetensors",
852
+ "model.visual.vision_tower.blocks.23.attn.qkv.weight": "model-00002-of-00002.safetensors",
853
+ "model.visual.vision_tower.blocks.23.mlp.fc1.bias": "model-00002-of-00002.safetensors",
854
+ "model.visual.vision_tower.blocks.23.mlp.fc1.weight": "model-00002-of-00002.safetensors",
855
+ "model.visual.vision_tower.blocks.23.mlp.fc2.bias": "model-00002-of-00002.safetensors",
856
+ "model.visual.vision_tower.blocks.23.mlp.fc2.weight": "model-00002-of-00002.safetensors",
857
+ "model.visual.vision_tower.blocks.23.norm1.bias": "model-00002-of-00002.safetensors",
858
+ "model.visual.vision_tower.blocks.23.norm1.weight": "model-00002-of-00002.safetensors",
859
+ "model.visual.vision_tower.blocks.23.norm2.bias": "model-00002-of-00002.safetensors",
860
+ "model.visual.vision_tower.blocks.23.norm2.weight": "model-00002-of-00002.safetensors",
861
+ "model.visual.vision_tower.blocks.24.attn.proj.bias": "model-00002-of-00002.safetensors",
862
+ "model.visual.vision_tower.blocks.24.attn.proj.weight": "model-00002-of-00002.safetensors",
863
+ "model.visual.vision_tower.blocks.24.attn.qkv.bias": "model-00002-of-00002.safetensors",
864
+ "model.visual.vision_tower.blocks.24.attn.qkv.weight": "model-00002-of-00002.safetensors",
865
+ "model.visual.vision_tower.blocks.24.mlp.fc1.bias": "model-00002-of-00002.safetensors",
866
+ "model.visual.vision_tower.blocks.24.mlp.fc1.weight": "model-00002-of-00002.safetensors",
867
+ "model.visual.vision_tower.blocks.24.mlp.fc2.bias": "model-00002-of-00002.safetensors",
868
+ "model.visual.vision_tower.blocks.24.mlp.fc2.weight": "model-00002-of-00002.safetensors",
869
+ "model.visual.vision_tower.blocks.24.norm1.bias": "model-00002-of-00002.safetensors",
870
+ "model.visual.vision_tower.blocks.24.norm1.weight": "model-00002-of-00002.safetensors",
871
+ "model.visual.vision_tower.blocks.24.norm2.bias": "model-00002-of-00002.safetensors",
872
+ "model.visual.vision_tower.blocks.24.norm2.weight": "model-00002-of-00002.safetensors",
873
+ "model.visual.vision_tower.blocks.25.attn.proj.bias": "model-00002-of-00002.safetensors",
874
+ "model.visual.vision_tower.blocks.25.attn.proj.weight": "model-00002-of-00002.safetensors",
875
+ "model.visual.vision_tower.blocks.25.attn.qkv.bias": "model-00002-of-00002.safetensors",
876
+ "model.visual.vision_tower.blocks.25.attn.qkv.weight": "model-00002-of-00002.safetensors",
877
+ "model.visual.vision_tower.blocks.25.mlp.fc1.bias": "model-00002-of-00002.safetensors",
878
+ "model.visual.vision_tower.blocks.25.mlp.fc1.weight": "model-00002-of-00002.safetensors",
879
+ "model.visual.vision_tower.blocks.25.mlp.fc2.bias": "model-00002-of-00002.safetensors",
880
+ "model.visual.vision_tower.blocks.25.mlp.fc2.weight": "model-00002-of-00002.safetensors",
881
+ "model.visual.vision_tower.blocks.25.norm1.bias": "model-00002-of-00002.safetensors",
882
+ "model.visual.vision_tower.blocks.25.norm1.weight": "model-00002-of-00002.safetensors",
883
+ "model.visual.vision_tower.blocks.25.norm2.bias": "model-00002-of-00002.safetensors",
884
+ "model.visual.vision_tower.blocks.25.norm2.weight": "model-00002-of-00002.safetensors",
885
+ "model.visual.vision_tower.blocks.26.attn.proj.bias": "model-00002-of-00002.safetensors",
886
+ "model.visual.vision_tower.blocks.26.attn.proj.weight": "model-00002-of-00002.safetensors",
887
+ "model.visual.vision_tower.blocks.26.attn.qkv.bias": "model-00002-of-00002.safetensors",
888
+ "model.visual.vision_tower.blocks.26.attn.qkv.weight": "model-00002-of-00002.safetensors",
889
+ "model.visual.vision_tower.blocks.26.mlp.fc1.bias": "model-00002-of-00002.safetensors",
890
+ "model.visual.vision_tower.blocks.26.mlp.fc1.weight": "model-00002-of-00002.safetensors",
891
+ "model.visual.vision_tower.blocks.26.mlp.fc2.bias": "model-00002-of-00002.safetensors",
892
+ "model.visual.vision_tower.blocks.26.mlp.fc2.weight": "model-00002-of-00002.safetensors",
893
+ "model.visual.vision_tower.blocks.26.norm1.bias": "model-00002-of-00002.safetensors",
894
+ "model.visual.vision_tower.blocks.26.norm1.weight": "model-00002-of-00002.safetensors",
895
+ "model.visual.vision_tower.blocks.26.norm2.bias": "model-00002-of-00002.safetensors",
896
+ "model.visual.vision_tower.blocks.26.norm2.weight": "model-00002-of-00002.safetensors",
897
+ "model.visual.vision_tower.blocks.3.attn.proj.bias": "model-00002-of-00002.safetensors",
898
+ "model.visual.vision_tower.blocks.3.attn.proj.weight": "model-00002-of-00002.safetensors",
899
+ "model.visual.vision_tower.blocks.3.attn.qkv.bias": "model-00002-of-00002.safetensors",
900
+ "model.visual.vision_tower.blocks.3.attn.qkv.weight": "model-00002-of-00002.safetensors",
901
+ "model.visual.vision_tower.blocks.3.mlp.fc1.bias": "model-00002-of-00002.safetensors",
902
+ "model.visual.vision_tower.blocks.3.mlp.fc1.weight": "model-00002-of-00002.safetensors",
903
+ "model.visual.vision_tower.blocks.3.mlp.fc2.bias": "model-00002-of-00002.safetensors",
904
+ "model.visual.vision_tower.blocks.3.mlp.fc2.weight": "model-00002-of-00002.safetensors",
905
+ "model.visual.vision_tower.blocks.3.norm1.bias": "model-00002-of-00002.safetensors",
906
+ "model.visual.vision_tower.blocks.3.norm1.weight": "model-00002-of-00002.safetensors",
907
+ "model.visual.vision_tower.blocks.3.norm2.bias": "model-00002-of-00002.safetensors",
908
+ "model.visual.vision_tower.blocks.3.norm2.weight": "model-00002-of-00002.safetensors",
909
+ "model.visual.vision_tower.blocks.4.attn.proj.bias": "model-00002-of-00002.safetensors",
910
+ "model.visual.vision_tower.blocks.4.attn.proj.weight": "model-00002-of-00002.safetensors",
911
+ "model.visual.vision_tower.blocks.4.attn.qkv.bias": "model-00002-of-00002.safetensors",
912
+ "model.visual.vision_tower.blocks.4.attn.qkv.weight": "model-00002-of-00002.safetensors",
913
+ "model.visual.vision_tower.blocks.4.mlp.fc1.bias": "model-00002-of-00002.safetensors",
914
+ "model.visual.vision_tower.blocks.4.mlp.fc1.weight": "model-00002-of-00002.safetensors",
915
+ "model.visual.vision_tower.blocks.4.mlp.fc2.bias": "model-00002-of-00002.safetensors",
916
+ "model.visual.vision_tower.blocks.4.mlp.fc2.weight": "model-00002-of-00002.safetensors",
917
+ "model.visual.vision_tower.blocks.4.norm1.bias": "model-00002-of-00002.safetensors",
918
+ "model.visual.vision_tower.blocks.4.norm1.weight": "model-00002-of-00002.safetensors",
919
+ "model.visual.vision_tower.blocks.4.norm2.bias": "model-00002-of-00002.safetensors",
920
+ "model.visual.vision_tower.blocks.4.norm2.weight": "model-00002-of-00002.safetensors",
921
+ "model.visual.vision_tower.blocks.5.attn.proj.bias": "model-00002-of-00002.safetensors",
922
+ "model.visual.vision_tower.blocks.5.attn.proj.weight": "model-00002-of-00002.safetensors",
923
+ "model.visual.vision_tower.blocks.5.attn.qkv.bias": "model-00002-of-00002.safetensors",
924
+ "model.visual.vision_tower.blocks.5.attn.qkv.weight": "model-00002-of-00002.safetensors",
925
+ "model.visual.vision_tower.blocks.5.mlp.fc1.bias": "model-00002-of-00002.safetensors",
926
+ "model.visual.vision_tower.blocks.5.mlp.fc1.weight": "model-00002-of-00002.safetensors",
927
+ "model.visual.vision_tower.blocks.5.mlp.fc2.bias": "model-00002-of-00002.safetensors",
928
+ "model.visual.vision_tower.blocks.5.mlp.fc2.weight": "model-00002-of-00002.safetensors",
929
+ "model.visual.vision_tower.blocks.5.norm1.bias": "model-00002-of-00002.safetensors",
930
+ "model.visual.vision_tower.blocks.5.norm1.weight": "model-00002-of-00002.safetensors",
931
+ "model.visual.vision_tower.blocks.5.norm2.bias": "model-00002-of-00002.safetensors",
932
+ "model.visual.vision_tower.blocks.5.norm2.weight": "model-00002-of-00002.safetensors",
933
+ "model.visual.vision_tower.blocks.6.attn.proj.bias": "model-00002-of-00002.safetensors",
934
+ "model.visual.vision_tower.blocks.6.attn.proj.weight": "model-00002-of-00002.safetensors",
935
+ "model.visual.vision_tower.blocks.6.attn.qkv.bias": "model-00002-of-00002.safetensors",
936
+ "model.visual.vision_tower.blocks.6.attn.qkv.weight": "model-00002-of-00002.safetensors",
937
+ "model.visual.vision_tower.blocks.6.mlp.fc1.bias": "model-00002-of-00002.safetensors",
938
+ "model.visual.vision_tower.blocks.6.mlp.fc1.weight": "model-00002-of-00002.safetensors",
939
+ "model.visual.vision_tower.blocks.6.mlp.fc2.bias": "model-00002-of-00002.safetensors",
940
+ "model.visual.vision_tower.blocks.6.mlp.fc2.weight": "model-00002-of-00002.safetensors",
941
+ "model.visual.vision_tower.blocks.6.norm1.bias": "model-00002-of-00002.safetensors",
942
+ "model.visual.vision_tower.blocks.6.norm1.weight": "model-00002-of-00002.safetensors",
943
+ "model.visual.vision_tower.blocks.6.norm2.bias": "model-00002-of-00002.safetensors",
944
+ "model.visual.vision_tower.blocks.6.norm2.weight": "model-00002-of-00002.safetensors",
945
+ "model.visual.vision_tower.blocks.7.attn.proj.bias": "model-00002-of-00002.safetensors",
946
+ "model.visual.vision_tower.blocks.7.attn.proj.weight": "model-00002-of-00002.safetensors",
947
+ "model.visual.vision_tower.blocks.7.attn.qkv.bias": "model-00002-of-00002.safetensors",
948
+ "model.visual.vision_tower.blocks.7.attn.qkv.weight": "model-00002-of-00002.safetensors",
949
+ "model.visual.vision_tower.blocks.7.mlp.fc1.bias": "model-00002-of-00002.safetensors",
950
+ "model.visual.vision_tower.blocks.7.mlp.fc1.weight": "model-00002-of-00002.safetensors",
951
+ "model.visual.vision_tower.blocks.7.mlp.fc2.bias": "model-00002-of-00002.safetensors",
952
+ "model.visual.vision_tower.blocks.7.mlp.fc2.weight": "model-00002-of-00002.safetensors",
953
+ "model.visual.vision_tower.blocks.7.norm1.bias": "model-00002-of-00002.safetensors",
954
+ "model.visual.vision_tower.blocks.7.norm1.weight": "model-00002-of-00002.safetensors",
955
+ "model.visual.vision_tower.blocks.7.norm2.bias": "model-00002-of-00002.safetensors",
956
+ "model.visual.vision_tower.blocks.7.norm2.weight": "model-00002-of-00002.safetensors",
957
+ "model.visual.vision_tower.blocks.8.attn.proj.bias": "model-00002-of-00002.safetensors",
958
+ "model.visual.vision_tower.blocks.8.attn.proj.weight": "model-00002-of-00002.safetensors",
959
+ "model.visual.vision_tower.blocks.8.attn.qkv.bias": "model-00002-of-00002.safetensors",
960
+ "model.visual.vision_tower.blocks.8.attn.qkv.weight": "model-00002-of-00002.safetensors",
961
+ "model.visual.vision_tower.blocks.8.mlp.fc1.bias": "model-00002-of-00002.safetensors",
962
+ "model.visual.vision_tower.blocks.8.mlp.fc1.weight": "model-00002-of-00002.safetensors",
963
+ "model.visual.vision_tower.blocks.8.mlp.fc2.bias": "model-00002-of-00002.safetensors",
964
+ "model.visual.vision_tower.blocks.8.mlp.fc2.weight": "model-00002-of-00002.safetensors",
965
+ "model.visual.vision_tower.blocks.8.norm1.bias": "model-00002-of-00002.safetensors",
966
+ "model.visual.vision_tower.blocks.8.norm1.weight": "model-00002-of-00002.safetensors",
967
+ "model.visual.vision_tower.blocks.8.norm2.bias": "model-00002-of-00002.safetensors",
968
+ "model.visual.vision_tower.blocks.8.norm2.weight": "model-00002-of-00002.safetensors",
969
+ "model.visual.vision_tower.blocks.9.attn.proj.bias": "model-00002-of-00002.safetensors",
970
+ "model.visual.vision_tower.blocks.9.attn.proj.weight": "model-00002-of-00002.safetensors",
971
+ "model.visual.vision_tower.blocks.9.attn.qkv.bias": "model-00002-of-00002.safetensors",
972
+ "model.visual.vision_tower.blocks.9.attn.qkv.weight": "model-00002-of-00002.safetensors",
973
+ "model.visual.vision_tower.blocks.9.mlp.fc1.bias": "model-00002-of-00002.safetensors",
974
+ "model.visual.vision_tower.blocks.9.mlp.fc1.weight": "model-00002-of-00002.safetensors",
975
+ "model.visual.vision_tower.blocks.9.mlp.fc2.bias": "model-00002-of-00002.safetensors",
976
+ "model.visual.vision_tower.blocks.9.mlp.fc2.weight": "model-00002-of-00002.safetensors",
977
+ "model.visual.vision_tower.blocks.9.norm1.bias": "model-00002-of-00002.safetensors",
978
+ "model.visual.vision_tower.blocks.9.norm1.weight": "model-00002-of-00002.safetensors",
979
+ "model.visual.vision_tower.blocks.9.norm2.bias": "model-00002-of-00002.safetensors",
980
+ "model.visual.vision_tower.blocks.9.norm2.weight": "model-00002-of-00002.safetensors",
981
+ "model.visual.vision_tower.patch_embed.proj.bias": "model-00002-of-00002.safetensors",
982
+ "model.visual.vision_tower.patch_embed.proj.weight": "model-00002-of-00002.safetensors",
983
+ "model.visual.vision_tower.pos_embed": "model-00002-of-00002.safetensors"
984
+ }
985
+ }
blobs/48d0950630d6c8df3a96de2e444abbaddd74ea2d ADDED
@@ -0,0 +1,317 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: other
3
+ language:
4
+ - multilingual
5
+ pipeline_tag: image-text-to-text
6
+ library_name: transformers
7
+ base_model:
8
+ - tencent/Hunyuan-Embodied-0.5
9
+ tags:
10
+ - hunyuan
11
+ - vision-language
12
+ - Embodied
13
+ - image-to-text
14
+ - 2B
15
+ - end-to-end
16
+ - MoT
17
+ ---
18
+ <div align="center">
19
+ <h1>HY-Embodied</h1>
20
+ <p><b>A Family of Embodied Foundation Models for Real-World Agents</b></p>
21
+ <p><i>Tencent Robotics X × HY Vision Team</i></p>
22
+
23
+ <a href="https://github.com/Tencent-Hunyuan/HY-Embodied/blob/master/hy_embodied_tech_report.pdf"><img src="https://img.shields.io/badge/Paper-Report-red?logo=report" alt="Tech Report"></a>
24
+ <a href="https://arxiv.org/abs/2604.07430"><img src="https://img.shields.io/badge/Paper-Arxiv-red?logo=arxiv" alt="Paper"></a>
25
+ <a href="https://huggingface.co/tencent/HY-Embodied-0.5/tree/main"><img src="https://img.shields.io/badge/Models-HuggingFace-yellow?logo=huggingface" alt="Models"></a>
26
+ <a href="https://github.com/Tencent-Hunyuan/HY-Embodied"><img src="https://img.shields.io/badge/GitHub-Repo-181717?logo=github&logoColor=white" alt="GitHub"></a>
27
+
28
+ </div>
29
+
30
+ <div align="center">
31
+ <video src="https://github.com/user-attachments/assets/a5c6b872-2cb0-4f52-8321-894fee7da27e" controls autoplay muted loop width="85%"></video>
32
+ </div>
33
+
34
+ ## 🔥 Updates
35
+
36
+ * **`[2026-04-09]`** 🚀 We have released **HY-Embodied-0.5**, featuring the open-sourced `HY-Embodied-0.5 MoT-2B` weights on [Hugging Face](https://huggingface.co/tencent/HY-Embodied-0.5/tree/main) along with the official inference code\!
37
+
38
+ ## 📖 Abstract
39
+
40
+ We introduce **HY-Embodied-0.5**, a suite of foundation models tailored specifically for real-world embodied intelligence. To bridge the gap between general Vision-Language Models (VLMs) and the strict demands of physical agents, our models are engineered to excel in spatial-temporal visual perception and complex embodied reasoning (prediction, interaction, and planning).
41
+
42
+ The suite features an innovative **Mixture-of-Transformers (MoT)** architecture utilizing latent tokens for modality-specific computing, significantly enhancing fine-grained perception. It includes two primary variants: a highly efficient **2B model** for edge deployment and a powerful **32B model** for complex reasoning. Through a self-evolving post-training paradigm and large-to-small on-policy distillation, our compact MoT-2B outperforms state-of-the-art models of similar size across 16 benchmarks, while the 32B variant achieves frontier-level performance comparable to Gemini 3.0 Pro. Ultimately, HY-Embodied serves as a robust "brain" for Vision-Language-Action (VLA) pipelines, delivering compelling results in real-world physical robot control.
43
+
44
+ <div align="center">
45
+ <img src="https://github.com/Tencent-Hunyuan/HY-Embodied/blob/master/figures/teaser.png?raw=true" alt="HY-Embodied Teaser" width="85%">
46
+ </div>
47
+
48
+
49
+ ## ⭐️ Key Features
50
+
51
+ * 🧠 **Evolved MoT Architecture:** Designed for maximum efficiency without sacrificing visual acuity. The MoT-2B variant contains 4B total parameters but requires **only 2.2B activated parameters** during inference. By emphasizing modality-specific computing in the vision pathway, it achieves the high inference speed of a dense 2B model while delivering superior, fine-grained perceptual representations.
52
+ * 🔗 **High-Quality Mixed Chain Reasoning:** We introduce an advanced iterative, self-evolving post-training pipeline. By employing on-policy distillation, we successfully transfer the sophisticated step-by-step reasoning, planning, and high-quality "thinking" capabilities from our powerful 32B model directly to the compact 2B variant.
53
+ * 🌍 **Large-Scale Embodied Pre-training:** Grounded in a massive, specially curated dataset comprising **\>100 million** embodied and spatial-specific data points. Trained on a corpus exceeding **200 billion tokens**, the model develops a deep, native understanding of 3D spaces, physical object interactions, and agent dynamics.
54
+ * 🦾 **Stronger VLA Application:** Beyond standard academic benchmarks, HY-Embodied is engineered to be the core cognitive engine for physical robots. It seamlessly integrates into Vision-Language-Action (VLA) frameworks, acting as a highly robust and capable brain to drive high success rates in complex, real-world robotic control tasks.
55
+
56
+ <div align="center">
57
+ <img src="https://github.com/Tencent-Hunyuan/HY-Embodied/blob/master/figures/arch.png?raw=true" alt="HY-Embodied Architecture" width="85%">
58
+ </div>
59
+
60
+ ## 📅 Plannings
61
+
62
+ - [x] Transformers Inference
63
+ - [ ] vLLM Inference
64
+ - [ ] Online Gradio Demo
65
+
66
+ ## 🛠️ Dependencies and Installation
67
+
68
+ ### Prerequisites
69
+
70
+ - 🖥️ **Operating System**: Linux (recommended)
71
+ - 🐍 **Python**: 3.12+ (recommended and tested)
72
+ - ⚡ **CUDA**: 12.6
73
+ - 🔥 **PyTorch**: 2.8.0
74
+ - 🎮 **GPU**: NVIDIA GPU with CUDA support
75
+
76
+ ### Installation
77
+
78
+ 1. **Install the specific Transformers version required for this model:**
79
+ ```bash
80
+ pip install git+https://github.com/huggingface/transformers@9293856c419762ebf98fbe2bd9440f9ce7069f1a
81
+ ```
82
+
83
+ > **Note**: We will merge the improvements into the Transformers main branch later.
84
+
85
+ 2. **Install other dependencies:**
86
+ ```bash
87
+ pip install -r requirements.txt
88
+ ```
89
+
90
+ ### Quick Start
91
+
92
+ 1. **Clone the repository:**
93
+ ```bash
94
+ git clone https://github.com/Tencent-Hunyuan/HY-Embodied
95
+ cd HY-Embodied/
96
+ ```
97
+
98
+ 2. **Install dependencies:**
99
+ ```bash
100
+ pip install -r requirements.txt
101
+ ```
102
+
103
+ 3. **Run inference:**
104
+ ```bash
105
+ python inference.py
106
+ ```
107
+
108
+ The example script demonstrates both single generation and batch generation capabilities.
109
+
110
+ ### Model Download
111
+
112
+ The code automatically downloads the model `tencent/HY-Embodied-0.5` from Hugging Face Hub. Ensure you have sufficient disk space (8 GB) for the model weights.
113
+
114
+ ### Hardware Requirements
115
+
116
+ - **GPU**: Recommended for optimal performance (NVIDIA GPU with at least 16GB VRAM)
117
+ - **CPU**: Supported but slower
118
+ - **Memory**: At least 16GB RAM recommended
119
+ - **Storage**: 20GB+ free space for model and dependencies
120
+
121
+ ## 🚀 Quick Start with Transformers
122
+
123
+ ### Basic Inference Example
124
+
125
+ ```python
126
+ import os
127
+ import torch
128
+ from transformers import AutoModelForImageTextToText, AutoProcessor
129
+
130
+ # Load model & processor
131
+ MODEL_PATH = "tencent/HY-Embodied-0.5"
132
+ DEVICE = "cuda"
133
+ THINKING_MODE = False
134
+ TEMPERATURE = 0.8
135
+
136
+ processor = AutoProcessor.from_pretrained(MODEL_PATH)
137
+
138
+ # Load chat template if available
139
+ chat_template_path = os.path.join(MODEL_PATH, "chat_template.jinja")
140
+ if os.path.exists(chat_template_path):
141
+ processor.chat_template = open(chat_template_path).read()
142
+
143
+ model = AutoModelForImageTextToText.from_pretrained(MODEL_PATH, torch_dtype=torch.bfloat16)
144
+ model.to(DEVICE).eval()
145
+
146
+ # Prepare input messages
147
+ messages = [
148
+ {
149
+ "role": "user",
150
+ "content": [
151
+ {"type": "image", "image": "./figures/example.jpg"},
152
+ {"type": "text", "text": "Describe the image in detail."},
153
+ ],
154
+ }
155
+ ]
156
+
157
+ # Process and generate
158
+ inputs = processor.apply_chat_template(
159
+ messages,
160
+ tokenize=True,
161
+ add_generation_prompt=True,
162
+ return_dict=True,
163
+ return_tensors="pt",
164
+ enable_thinking=THINKING_MODE,
165
+ ).to(model.device)
166
+
167
+ with torch.no_grad():
168
+ generated_ids = model.generate(
169
+ **inputs,
170
+ max_new_tokens=32768,
171
+ use_cache=True,
172
+ temperature=TEMPERATURE,
173
+ do_sample=TEMPERATURE > 0,
174
+ )
175
+
176
+ output_ids = [out[len(inp):] for inp, out in zip(inputs.input_ids, generated_ids)]
177
+ print(processor.batch_decode(output_ids, skip_special_tokens=True)[0])
178
+ ```
179
+
180
+ ### Batch Inference
181
+
182
+ ```python
183
+ import os
184
+ import torch
185
+ from transformers import AutoModelForImageTextToText, AutoProcessor
186
+
187
+ # Load model & processor
188
+ MODEL_PATH = "tencent/HY-Embodied-0.5"
189
+ DEVICE = "cuda"
190
+ THINKING_MODE = False
191
+ TEMPERATURE = 0.8
192
+
193
+ processor = AutoProcessor.from_pretrained(MODEL_PATH)
194
+
195
+ # Load chat template if available
196
+ chat_template_path = os.path.join(MODEL_PATH, "chat_template.jinja")
197
+ if os.path.exists(chat_template_path):
198
+ processor.chat_template = open(chat_template_path).read()
199
+
200
+ model = AutoModelForImageTextToText.from_pretrained(MODEL_PATH, torch_dtype=torch.bfloat16)
201
+ model.to(DEVICE).eval()
202
+
203
+ # Batch Inference (multiple prompts at once)
204
+ messages_batch = [
205
+ # Sample A: image + text
206
+ [
207
+ {
208
+ "role": "user",
209
+ "content": [
210
+ {"type": "image", "image": "./figures/example.jpg"},
211
+ {"type": "text", "text": "Describe the image in detail."},
212
+ ],
213
+ }
214
+ ],
215
+ # Sample B: text only
216
+ [
217
+ {
218
+ "role": "user",
219
+ "content": [
220
+ {"type": "text", "text": "How to open a fridge?"},
221
+ ],
222
+ }
223
+ ],
224
+ ]
225
+
226
+ # Process each message independently
227
+ all_inputs = []
228
+ for msgs in messages_batch:
229
+ inp = processor.apply_chat_template(
230
+ msgs,
231
+ tokenize=True,
232
+ add_generation_prompt=True,
233
+ return_dict=True,
234
+ return_tensors="pt",
235
+ enable_thinking=THINKING_MODE,
236
+ )
237
+ all_inputs.append(inp)
238
+
239
+ # Left-pad and batch
240
+ batch = processor.pad(all_inputs, padding=True, padding_side="left").to(model.device)
241
+
242
+ with torch.no_grad():
243
+ batch_generated_ids = model.generate(
244
+ **batch,
245
+ max_new_tokens=32768,
246
+ use_cache=True,
247
+ temperature=TEMPERATURE,
248
+ do_sample=TEMPERATURE > 0,
249
+ )
250
+
251
+ # Decode: strip the padded input portion
252
+ padded_input_len = batch["input_ids"].shape[1]
253
+ for i, msgs in enumerate(messages_batch):
254
+ out_ids = batch_generated_ids[i][padded_input_len:]
255
+ print(f"\n--- Sample {i} ---")
256
+ print(processor.decode(out_ids, skip_special_tokens=True))
257
+ ```
258
+
259
+ ## 📊 Evaluation
260
+
261
+ ### Visual Perception
262
+
263
+ > **Note**: We evaluated HY-Embodied-0.5 MoT-2B across 22 embodied-relevant benchmarks against models of similar size. For detailed performance metrics and methodology, please refer to our technical report.
264
+
265
+ > **Note**: We observed that small models from the Qwen3.5 series produce repetitive thinking patterns in some benchmarks, which leads to lower overall results. Therefore, we compare against Qwen3-VL models in our evaluations.
266
+
267
+ | Benchmark | HY-Embodied 0.5 MoT-2B | Qwen3-VL 2B | Qwen3-VL 4B | RoboBrain 2.5 4B | MiMo-Embodied 7B |
268
+ |-----------|------------------------|-------------|-------------|------------------|------------------|
269
+ | CV-Bench | **89.2** | 80.0 | 85.7 | 86.9 | 88.8 |
270
+ | DA-2K | **92.3** | 69.5 | 76.5 | 79.4 | 72.2 |
271
+
272
+ ### Embodied Understanding
273
+
274
+ | Benchmark | HY-Embodied 0.5 MoT-2B | Qwen3-VL 2B | Qwen3-VL 4B | RoboBrain 2.5 4B | MiMo-Embodied 7B |
275
+ |-----------|------------------------|-------------|-------------|------------------|------------------|
276
+ | ERQA | **54.5** | 41.8 | 47.3 | 43.3 | 46.8 |
277
+ | EmbSpatial-Bench | **82.8** | 75.9 | 80.7 | 73.8 | 76.2 |
278
+ | RoboBench-MCQ | **49.2** | 36.9 | 45.8 | 44.4 | 43.6 |
279
+ | RoboBench-Planning | 54.2 | 36.2 | 36.4 | 39.2 | **58.7** |
280
+ | RoboSpatial-Home | 55.7 | 45.3 | **63.2** | 62.3 | 61.8 |
281
+ | ShareRobot-Aff. | **26.8** | 19.8 | 25.5 | 25.5 | 9.0 |
282
+ | ShareRobot-Traj. | 73.3 | 41.6 | 62.2 | **81.4** | 50.6 |
283
+ | Ego-Plan2 | 45.5 | 35.5 | 38.8 | **52.6** | 39.9 |
284
+
285
+ ### Spatial Understanding
286
+
287
+ | Benchmark | HY-Embodied 0.5 MoT-2B | Qwen3-VL 2B | Qwen3-VL 4B | RoboBrain 2.5 4B | MiMo-Embodied 7B |
288
+ |-----------|------------------------|-------------|-------------|------------------|------------------|
289
+ | 3DSRBench | **57.0** | 39.9 | 43.9 | 44.8 | 42.0 |
290
+ | All-Angles Bench | **55.1** | 42.3 | 46.7 | 43.8 | 49.0 |
291
+ | MindCube | **66.3** | 28.4 | 31.0 | 26.9 | 36.2 |
292
+ | MMSI-Bench | **33.2** | 23.6 | 25.1 | 20.5 | 31.9 |
293
+ | RefSpatial-Bench | 45.8 | 28.9 | 45.3 | **56.0** | 48.0 |
294
+ | SAT | 76.7 | 45.3 | 56.7 | 51.3 | **78.7** |
295
+ | SIBench-mini | **58.2** | 42.0 | 50.9 | 47.3 | 53.1 |
296
+ | SITE-Bench-Image | **62.7** | 52.3 | 61.0 | 57.9 | 49.9 |
297
+ | SITE-Bench-Video | **63.5** | 52.2 | 58.0 | 54.8 | 58.9 |
298
+ | ViewSpatial | **53.1** | 37.2 | 41.6 | 36.6 | 36.1 |
299
+ | VSIBench | **60.5** | 48.0 | 55.2 | 41.7 | 48.5 |
300
+ | Where2Place | **68.0** | 45.0 | 59.0 | 65.0 | 63.6 |
301
+
302
+ *Note: Results for HY-Embodied-0.5 MoT-2B are reported in thinking mode, while for all other models, we report the better performance between non-thinking and thinking modes.*
303
+
304
+ ## 📚 Citation
305
+ If you find it useful for your research and applications, please cite our paper using this BibTeX:
306
+ ```bibtex
307
+ @article{tencent2026hyembodied05,
308
+ title={HY-Embodied-0.5: Embodied Foundation Models for Real-World Agents},
309
+ author={Tencent Robotics X and HY Vision Team},
310
+ journal={arXiv preprint arXiv:2604.07430},
311
+ year={2026}
312
+ }
313
+ ```
314
+
315
+ ## 🙏 Acknowledgements
316
+
317
+ We thank the Hugging Face community for their support and the open-source contributions that made this implementation possible.
blobs/4a5388286b92df802c1599a1a2cfcd4fa44fafb6 ADDED
The diff for this file is too large to render. See raw diff
 
blobs/53302b5a885b0702390a64f6590481a1d9ae023b ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "crop_size": null,
3
+ "data_format": "channels_first",
4
+ "default_to_square": true,
5
+ "device": null,
6
+ "disable_grouping": null,
7
+ "do_center_crop": null,
8
+ "do_convert_rgb": true,
9
+ "do_normalize": true,
10
+ "do_pad": null,
11
+ "do_rescale": true,
12
+ "do_resize": true,
13
+ "image_mean": [
14
+ 0.5,
15
+ 0.5,
16
+ 0.5
17
+ ],
18
+ "image_processor_type": "Qwen2VLImageProcessorFast",
19
+ "image_std": [
20
+ 0.5,
21
+ 0.5,
22
+ 0.5
23
+ ],
24
+ "input_data_format": null,
25
+ "max_pixels": 4194304,
26
+ "merge_size": 2,
27
+ "min_pixels": 25088,
28
+ "pad_size": null,
29
+ "patch_size": 16,
30
+ "processor_class": "HunYuanVLMoTProcessor",
31
+ "resample": 3,
32
+ "rescale_factor": 0.00392156862745098,
33
+ "return_tensors": null,
34
+ "size": {
35
+ "longest_edge": 4194304,
36
+ "shortest_edge": 25088
37
+ },
38
+ "temporal_patch_size": 1
39
+ }
blobs/580d310c2d211aca166288a207e972ced9bb0100 ADDED
@@ -0,0 +1 @@
 
 
1
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
blobs/6c83b16a6412e25a7bae459daf7e93466c885093 ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ TENCENT HY COMMUNITY LICENSE AGREEMENT
2
+ Tencent HY Embodied Release Date: April 3, 2026
3
+ THIS LICENSE AGREEMENT DOES NOT APPLY IN THE EUROPEAN UNION, UNITED KINGDOM AND SOUTH KOREA AND IS EXPRESSLY LIMITED TO THE TERRITORY, AS DEFINED BELOW.
4
+ By clicking to agree or by using, reproducing, modifying, distributing, performing or displaying any portion or element of the Tencent HY Works, including via any Hosted Service, You will be deemed to have recognized and accepted the content of this Agreement, which is effective immediately.
5
+ 1. DEFINITIONS.
6
+ a. “Acceptable Use Policy” shall mean the policy made available by Tencent as set forth in the Exhibit A.
7
+ b. “Agreement” shall mean the terms and conditions for use, reproduction, distribution, modification, performance and displaying of Tencent HY Works or any portion or element thereof set forth herein.
8
+ c. “Documentation” shall mean the specifications, manuals and documentation for Tencent HY made publicly available by Tencent.
9
+ d. “Hosted Service” shall mean a hosted service offered via an application programming interface (API), web access, or any other electronic or remote means.
10
+ e. “Licensee,” “You” or “Your” shall mean a natural person or legal entity exercising the rights granted by this Agreement and/or using the Tencent HY Works for any purpose and in any field of use.
11
+ f. “Materials” shall mean, collectively, Tencent’s proprietary Tencent HY and Documentation (and any portion thereof) as made available by Tencent under this Agreement.
12
+ g. “Model Derivatives” shall mean all: (i) modifications to Tencent HY or any Model Derivative of Tencent HY; (ii) works based on Tencent HY or any Model Derivative of Tencent HY; or (iii) any other machine learning model which is created by transfer of patterns of the weights, parameters, operations, or Output of Tencent HY or any Model Derivative of Tencent HY, to that model in order to cause that model to perform similarly to Tencent HY or a Model Derivative of Tencent HY, including distillation methods, methods that use intermediate data representations, or methods based on the generation of synthetic data Outputs by Tencent HY or a Model Derivative of Tencent HY for training that model. For clarity, Outputs by themselves are not deemed Model Derivatives.
13
+ h. “Output” shall mean the information and/or content output of Tencent HY or a Model Derivative that results from operating or otherwise using Tencent HY or a Model Derivative, including via a Hosted Service.
14
+ i. “Tencent,” “We” or “Us” shall mean the applicable entity or entities in the Tencent corporate family that own(s) intellectual property or other rights embodied in or utilized by the Materials.
15
+ j. “Tencent HY” shall mean the large language models, text/image/video/audio/3D generation models, and multimodal large language models and their software and algorithms, including trained model weights, parameters (including optimizer states), machine-learning model code, inference-enabling code, training-enabling code, fine-tuning enabling code and other elements of the foregoing made publicly available by Us, including, without limitation to, Tencent HY Embodied released at [https://github.com/Tencent-Hunyuan/HY-Embodied].
16
+ k. “Tencent HY Works” shall mean: (i) the Materials; (ii) Model Derivatives; and (iii) all derivative works thereof.
17
+ l. “Territory” shall mean the worldwide territory, excluding the territory of the European Union, United Kingdom and South Korea.
18
+ m. “Third Party” or “Third Parties” shall mean individuals or legal entities that are not under common control with Us or You.
19
+ n. “including” shall mean including but not limited to.
20
+ 2. GRANT OF RIGHTS.
21
+ We grant You, for the Territory only, a non-exclusive, non-transferable and royalty-free limited license under Tencent’s intellectual property or other rights owned by Us embodied in or utilized by the Materials to use, reproduce, distribute, create derivative works of (including Model Derivatives), and make modifications to the Materials, only in accordance with the terms of this Agreement and the Acceptable Use Policy, and You must not violate (or encourage or permit anyone else to violate) any term of this Agreement or the Acceptable Use Policy.
22
+ 3. DISTRIBUTION.
23
+ You may, subject to Your compliance with this Agreement, distribute or make available to Third Parties the Tencent HY Works, exclusively in the Territory, provided that You meet all of the following conditions:
24
+ a. You must provide all such Third Party recipients of the Tencent HY Works or products or services using them a copy of this Agreement;
25
+ b. You must cause any modified files to carry prominent notices stating that You changed the files;
26
+ c. You are encouraged to: (i) publish at least one technology introduction blogpost or one public statement expressing Your experience of using the Tencent HY Works; and (ii) mark the products or services developed by using the Tencent HY Works to indicate that the product/service is ���Powered by Tencent HY”; and
27
+ d. All distributions to Third Parties (other than through a Hosted Service) must be accompanied by a “Notice” text file that contains the following notice: “Tencent HY is licensed under the Tencent HY Community License Agreement, Copyright © 2026 Tencent. All Rights Reserved. The trademark rights of “Tencent HY” are owned by Tencent or its affiliate.”
28
+ e. In the event that You use, integrate, implement, or otherwise deploy the Tencent HY Works, in whole or in part, to provide, enable, or support any service, product, or functionality to third parties, You shall clearly, accurately, and prominently disclose to all end users the full legal name and entity of the actual provider of such service, product, or functionality. You shall expressly and conspicuously state that Tencent is not affiliated with, associated with, sponsoring, or endorsing any such service, product, or functionality. You shall not use or display any name, logo, trademark, trade name, or other indicia of Tencent in any manner that could be construed as, or be likely to create, confusion, deception, or a false impression regarding any relationship, affiliation, sponsorship, or endorsement by Tencent.
29
+ You may add Your own copyright statement to Your modifications and, except as set forth in this Section and in Section 5, may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Model Derivatives as a whole, provided Your use, reproduction, modification, distribution, performance and display of the work otherwise complies with the terms and conditions of this Agreement (including as regards the Territory). If You receive Tencent HY Works from a Licensee as part of an integrated end user product, then this Section 3 of this Agreement will not apply to You.
30
+ 4. ADDITIONAL COMMERCIAL TERMS.
31
+ If, on the Tencent HY version release date, the monthly active users of all products or services made available by or for Licensee is greater than 100 million monthly active users in the preceding calendar month, You must request a license from Tencent, which Tencent may grant to You in its sole discretion, and You are not authorized to exercise any of the rights under this Agreement unless or until Tencent otherwise expressly grants You such rights.
32
+ 5. RULES OF USE.
33
+ a. Your use of the Tencent HY Works must comply with applicable laws and regulations (including trade compliance laws and regulations) and adhere to the Acceptable Use Policy for the Tencent HY Works, which is hereby incorporated by reference into this Agreement. You must include the use restrictions referenced in these Sections 5(a) and 5(b) as an enforceable provision in any agreement (e.g., license agreement, terms of use, etc.) governing the use and/or distribution of Tencent HY Works and You must provide notice to subsequent users to whom You distribute that Tencent HY Works are subject to the use restrictions in these Sections 5(a) and 5(b).
34
+ b. You must not use the Tencent HY Works or any Output or results of the Tencent HY Works to improve any other AI model (other than Tencent HY or Model Derivatives thereof).
35
+ c. You must not use, reproduce, modify, distribute, or display the Tencent HY Works, Output or results of the Tencent HY Works outside the Territory. Any such use outside the Territory is unlicensed and unauthorized under this Agreement.
36
+ 6. INTELLECTUAL PROPERTY.
37
+ a. Subject to Tencent’s ownership of Tencent HY Works made by or for Tencent and intellectual property rights therein, conditioned upon Your compliance with the terms and conditions of this Agreement, as between You and Tencent, You will be the owner of any derivative works and modifications of the Materials and any Model Derivatives that are made by or for You.
38
+ b. No trademark licenses are granted under this Agreement, and in connection with the Tencent HY Works, Licensee may not use any name or mark owned by or associated with Tencent or any of its affiliates, except as required for reasonable and customary use in describing and distributing the Tencent HY Works. Tencent hereby grants You a license to use “Tencent HY” (the “Mark”) in the Territory solely as required to comply with the provisions of Section 3(c), provided that You comply with any applicable laws related to trademark protection. All goodwill arising out of Your use of the Mark will inure to the benefit of Tencent.
39
+ c. If You commence a lawsuit or other proceedings (including a cross-claim or counterclaim in a lawsuit) against Us or any person or entity alleging that the Materials or any Output, or any portion of any of the foregoing, infringe any intellectual property or other right owned or licensable by You, then all licenses granted to You under this Agreement shall terminate as of the date such lawsuit or other proceeding is filed. You will defend, indemnify and hold harmless Us from and against any claim by any Third Party arising out of or related to Your or the Third Party’s use or distribution of the Tencent HY Works.
40
+ d. Tencent claims no rights in Outputs You generate. You and Your users are solely responsible for Outputs and their subsequent uses.
41
+ 7. DISCLAIMERS OF WARRANTY AND LIMITATIONS OF LIABILITY.
42
+ a. We are not obligated to support, update, provide training for, or develop any further version of the Tencent HY Works or to grant any license thereto.
43
+ b. UNLESS AND ONLY TO THE EXTENT REQUIRED BY APPLICABLE LAW, THE TENCENT HY WORKS AND ANY OUTPUT AND RESULTS THEREFROM ARE PROVIDED “AS IS” WITHOUT ANY EXPRESS OR IMPLIED WARRANTIES OF ANY KIND INCLUDING ANY WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, COURSE OF DEALING, USAGE OF TRADE, OR FITNESS FOR A PARTICULAR PURPOSE. YOU ARE SOLELY RESPONSIBLE FOR DETERMINING THE APPROPRIATENESS OF USING, REPRODUCING, MODIFYING, PERFORMING, DISPLAYING OR DISTRIBUTING ANY OF THE TENCENT HY WORKS OR OUTPUTS AND ASSUME ANY AND ALL RISKS ASSOCIATED WITH YOUR OR A THIRD PARTY’S USE OR DISTRIBUTION OF ANY OF THE TENCENT HY WORKS OR OUTPUTS AND YOUR EXERCISE OF RIGHTS AND PERMISSIONS UNDER THIS AGREEMENT.
44
+ c. TO THE FULLEST EXTENT PERMITTED BY APPLICABLE LAW, IN NO EVENT SHALL TENCENT OR ITS AFFILIATES BE LIABLE UNDER ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, TORT, NEGLIGENCE, PRODUCTS LIABILITY, OR OTHERWISE, FOR ANY DAMAGES, INCLUDING ANY DIRECT, INDIRECT, SPECIAL, INCIDENTAL, EXEMPLARY, CONSEQUENTIAL OR PUNITIVE DAMAGES, OR LOST PROFITS OF ANY KIND ARISING FROM THIS AGREEMENT OR RELATED TO ANY OF THE TENCENT HY WORKS OR OUTPUTS, EVEN IF TENCENT OR ITS AFFILIATES HAVE BEEN ADVISED OF THE POSSIBILITY OF ANY OF THE FOREGOING.
45
+ 8. SURVIVAL AND TERMINATION.
46
+ a. The term of this Agreement shall commence upon Your acceptance of this Agreement or access to the Materials and will continue in full force and effect until terminated in accordance with the terms and conditions herein.
47
+ b. We may terminate this Agreement if You breach any of the terms or conditions of this Agreement. Upon termination of this Agreement, You must promptly delete and cease use of the Tencent HY Works. Sections 6(a), 6(c), 7 and 9 shall survive the termination of this Agreement.
48
+ 9. GOVERNING LAW AND JURISDICTION.
49
+ a. This Agreement and any dispute arising out of or relating to it will be governed by the laws of the Hong Kong Special Administrative Region of the People’s Republic of China, without regard to conflict of law principles, and the UN Convention on Contracts for the International Sale of Goods does not apply to this Agreement.
50
+ b. Exclusive jurisdiction and venue for any dispute arising out of or relating to this Agreement will be a court of competent jurisdiction in the Hong Kong Special Administrative Region of the People’s Republic of China, and Tencent and Licensee consent to the exclusive jurisdiction of such court with respect to any such dispute.
51
+
52
+ EXHIBIT A
53
+ ACCEPTABLE USE POLICY
54
+
55
+ Tencent reserves the right to update this Acceptable Use Policy from time to time.
56
+ Last modified: December 30, 2025
57
+
58
+ Tencent endeavors to promote safe and fair use of its tools and features, including Tencent HY. You agree not to use Tencent HY or Model Derivatives:
59
+ 1. Outside the Territory;
60
+ 2. In any way that violates any applicable national, federal, state, local, international or any other law or regulation;
61
+ 3. To harm Yourself or others;
62
+ 4. To repurpose or distribute output from Tencent HY or any Model Derivatives to harm Yourself or others;
63
+ 5. To override or circumvent the safety guardrails and safeguards We have put in place;
64
+ 6. For the purpose of exploiting, harming or attempting to exploit or harm minors in any way;
65
+ 7. To generate or disseminate verifiably false information and/or content with the purpose of harming others or influencing elections;
66
+ 8. To generate or facilitate false online engagement, including fake reviews and other means of fake online engagement;
67
+ 9. To intentionally defame, disparage or otherwise harass others;
68
+ 10. To generate and/or disseminate malware (including ransomware) or any other content to be used for the purpose of harming electronic systems;
69
+ 11. To generate or disseminate personal identifiable information with the purpose of harming others;
70
+ 12. To generate or disseminate information (including images, code, posts, articles), and place the information in any public context (including –through the use of bot generated tweets), without expressly and conspicuously identifying that the information and/or content is machine generated;
71
+ 13. To impersonate another individual without consent, authorization, or legal right;
72
+ 14. To make high-stakes automated decisions in domains that affect an individual’s safety, rights or wellbeing (e.g., law enforcement, migration, medicine/health, management of critical infrastructure, safety components of products, essential services, credit, employment, housing, education, social scoring, or insurance);
73
+ 15. In a manner that violates or disrespects the social ethics and moral standards of other countries or regions;
74
+ 16. To perform, facilitate, threaten, incite, plan, promote or encourage violent extremism or terrorism;
75
+ 17. For any use intended to discriminate against or harm individuals or groups based on protected characteristics or categories, online or offline social behavior or known or predicted personal or personality characteristics;
76
+ 18. To intentionally exploit any of the vulnerabilities of a specific group of persons based on their age, social, physical or mental characteristics, in order to materially distort the behavior of a person pertaining to that group in a manner that causes or is likely to cause that person or another person physical or psychological harm;
77
+ 19. For military purposes;
78
+ 20. To engage in the unauthorized or unlicensed practice of any profession including, but not limited to, financial, legal, medical/health, or other professional practices.
blobs/962c4ff7160f8765f4a0a2a1f96c465cba6d8d8e ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 120000,
4
+ "eos_token_id": [
5
+ 120020
6
+ ],
7
+ "pad_token_id": 120002,
8
+ "transformers_version": "4.57.0"
9
+ }
blobs/a5ad94eb2fc98b41bd89b8cf33f446e86c472ccd15382c0f17a53028af90258d.incomplete ADDED
Binary file (47.3 kB). View file
 
blobs/d74f9032fc644874e6fe9280be84d4e7b0907939 ADDED
The diff for this file is too large to render. See raw diff
 
blobs/de6aa5e4ad061bbe8400d0a92bb2243a59b3dc49 ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_classification_head": false,
3
+ "architectures": [
4
+ "HunYuanVLMoTForConditionalGeneration"
5
+ ],
6
+ "auto_map": {
7
+ "AutoConfig": "configuration_hunyuan_vl_mot.HunYuanVLMoTConfig",
8
+ "AutoModelForImageTextToText": "modeling_hunyuan_vl_mot.HunYuanVLMoTForConditionalGeneration",
9
+ "AutoProcessor": "processing_hunyuan_vl_mot.HunYuanVLMoTProcessor"
10
+ },
11
+ "attention_bias": false,
12
+ "attention_dropout": 0.0,
13
+ "attention_head_dim": 128,
14
+ "bos_token_id": 120000,
15
+ "cla_share_factor": 2,
16
+ "class_num": 0,
17
+ "dense_list": [
18
+ 2048,
19
+ 0
20
+ ],
21
+ "dtype": "bfloat16",
22
+ "eos_token_id": 120020,
23
+ "head_dim": 128,
24
+ "hidden_act": "silu",
25
+ "hidden_size": 2048,
26
+ "im_end_id": 5,
27
+ "im_newline_id": 11,
28
+ "im_start_id": 4,
29
+ "initializer_range": 0.02,
30
+ "intermediate_size": 6144,
31
+ "mask_init_id": 12,
32
+ "max_position_embeddings": 262144,
33
+ "mlp_bias": false,
34
+ "model_type": "hunyuan_vl_mot",
35
+ "norm_type": "rms",
36
+ "num_attention_heads": 16,
37
+ "num_hidden_layers": 32,
38
+ "num_key_value_heads": 4,
39
+ "org_vocab_size": 120818,
40
+ "pad_id": 120002,
41
+ "pad_token_id": 120002,
42
+ "pool_type": "last",
43
+ "pretraining_tp": 1,
44
+ "rms_norm_eps": 1e-05,
45
+ "rope_scaling": {
46
+ "alpha": 1000.0,
47
+ "beta_fast": 32,
48
+ "beta_slow": 1,
49
+ "factor": 1.0,
50
+ "mscale": 1.0,
51
+ "mscale_all_dim": 1.0,
52
+ "type": "dynamic"
53
+ },
54
+ "rope_theta": 10000.0,
55
+ "sep_token_id": 120007,
56
+ "text_end_id": 7,
57
+ "text_start_id": 6,
58
+ "tie_word_embeddings": true,
59
+ "transformers_version": "4.57.0",
60
+ "use_cache": true,
61
+ "use_cla": false,
62
+ "use_qk_norm": true,
63
+ "use_rotary_pos_emb": true,
64
+ "vocab_size": 120818
65
+ }
blobs/ede21e29cb7086f12a84eca01cda0a448e368772 ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<|hy_begin▁of▁sentence|>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "<|hy_place▁holder▁no▁2|>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "<|hy_▁pad▁|>",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ }
23
+ }
blobs/f00e0ca743748c8a900d937f9b7a2f043a39ae13 ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "crop_size": null,
3
+ "data_format": "channels_first",
4
+ "default_to_square": true,
5
+ "device": null,
6
+ "do_center_crop": null,
7
+ "do_convert_rgb": true,
8
+ "do_normalize": true,
9
+ "do_rescale": true,
10
+ "do_resize": true,
11
+ "do_sample_frames": true,
12
+ "fps": 2.0,
13
+ "image_mean": [
14
+ 0.5,
15
+ 0.5,
16
+ 0.5
17
+ ],
18
+ "image_std": [
19
+ 0.5,
20
+ 0.5,
21
+ 0.5
22
+ ],
23
+ "input_data_format": null,
24
+ "max_frames": 16,
25
+ "merge_size": 2,
26
+ "min_frames": 8,
27
+ "num_frames": null,
28
+ "pad_size": null,
29
+ "patch_size": 16,
30
+ "processor_class": "HunYuanVLMoTProcessor",
31
+ "resample": 3,
32
+ "rescale_factor": 0.00392156862745098,
33
+ "return_metadata": false,
34
+ "size": {
35
+ "longest_edge": 134217728,
36
+ "shortest_edge": 100352
37
+ },
38
+ "temporal_patch_size": 1,
39
+ "video_metadata": null,
40
+ "video_processor_type": "Qwen3VLVideoProcessor"
41
+ }
blobs/f78e77184ab4f3d53e28b85ffda8b328185cc40fcfaab59d17c3df0f2524621e.incomplete ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a62ea5b1a27a3ab66b90d09d414c4cfbb2d83cf6ed15c0add6a1d8c77b1e4f75
3
+ size 134148341