drwlf commited on
Commit
cc6d93d
·
verified ·
1 Parent(s): 5799222

Delete drwlf

Browse files
drwlf/Medra4b/added_tokens.json DELETED
@@ -1,3 +0,0 @@
1
- {
2
- "<image_soft_token>": 262144
3
- }
 
 
 
 
drwlf/Medra4b/chat_template.json DELETED
@@ -1,3 +0,0 @@
1
- {
2
- "chat_template": "{{ bos_token }}\n{%- if messages[0]['role'] == 'system' -%}\n {%- if messages[0]['content'] is string -%}\n {%- set first_user_prefix = messages[0]['content'] + '\n\n' -%}\n {%- else -%}\n {%- set first_user_prefix = messages[0]['content'][0]['text'] + '\n\n' -%}\n {%- endif -%}\n {%- set loop_messages = messages[1:] -%}\n{%- else -%}\n {%- set first_user_prefix = \"\" -%}\n {%- set loop_messages = messages -%}\n{%- endif -%}\n{%- for message in loop_messages -%}\n {%- if (message['role'] == 'user') != (loop.index0 % 2 == 0) -%}\n {{ raise_exception(\"Conversation roles must alternate user/assistant/user/assistant/...\") }}\n {%- endif -%}\n {%- if (message['role'] == 'assistant') -%}\n {%- set role = \"model\" -%}\n {%- else -%}\n {%- set role = message['role'] -%}\n {%- endif -%}\n {{ '<start_of_turn>' + role + '\n' + (first_user_prefix if loop.first else \"\") }}\n {%- if message['content'] is string -%}\n {{ message['content'] | trim }}\n {%- elif message['content'] is iterable -%}\n {%- for item in message['content'] -%}\n {%- if item['type'] == 'image' -%}\n {{ '<start_of_image>' }}\n {%- elif item['type'] == 'text' -%}\n {{ item['text'] | trim }}\n {%- endif -%}\n {%- endfor -%}\n {%- else -%}\n {{ raise_exception(\"Invalid content type\") }}\n {%- endif -%}\n {{ '<end_of_turn>\n' }}\n{%- endfor -%}\n{%- if add_generation_prompt -%}\n {{'<start_of_turn>model\n'}}\n{%- endif -%}\n"
3
- }
 
 
 
 
drwlf/Medra4b/config.json DELETED
@@ -1,60 +0,0 @@
1
- {
2
- "architectures": [
3
- "Gemma3ForConditionalGeneration"
4
- ],
5
- "boi_token_index": 255999,
6
- "eoi_token_index": 256000,
7
- "eos_token_id": 1,
8
- "image_token_index": 262144,
9
- "initializer_range": 0.02,
10
- "mm_tokens_per_image": 256,
11
- "model_type": "gemma3",
12
- "text_config": {
13
- "attention_bias": false,
14
- "attention_dropout": 0.0,
15
- "attn_logit_softcapping": null,
16
- "cache_implementation": "hybrid",
17
- "final_logit_softcapping": null,
18
- "head_dim": 256,
19
- "hidden_activation": "gelu_pytorch_tanh",
20
- "hidden_size": 2560,
21
- "initializer_range": 0.02,
22
- "intermediate_size": 10240,
23
- "max_position_embeddings": 131072,
24
- "model_type": "gemma3_text",
25
- "num_attention_heads": 8,
26
- "num_hidden_layers": 34,
27
- "num_key_value_heads": 4,
28
- "query_pre_attn_scalar": 256,
29
- "rms_norm_eps": 1e-06,
30
- "rope_local_base_freq": 10000.0,
31
- "rope_scaling": {
32
- "factor": 8.0,
33
- "rope_type": "linear"
34
- },
35
- "rope_theta": 1000000.0,
36
- "sliding_window": 1024,
37
- "sliding_window_pattern": 6,
38
- "torch_dtype": "bfloat16",
39
- "use_cache": false,
40
- "vocab_size": 262208
41
- },
42
- "torch_dtype": "bfloat16",
43
- "transformers_version": "4.51.3",
44
- "use_cache": true,
45
- "vision_config": {
46
- "attention_dropout": 0.0,
47
- "hidden_act": "gelu_pytorch_tanh",
48
- "hidden_size": 1152,
49
- "image_size": 896,
50
- "intermediate_size": 4304,
51
- "layer_norm_eps": 1e-06,
52
- "model_type": "siglip_vision_model",
53
- "num_attention_heads": 16,
54
- "num_channels": 3,
55
- "num_hidden_layers": 27,
56
- "patch_size": 14,
57
- "torch_dtype": "bfloat16",
58
- "vision_use_head": false
59
- }
60
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
drwlf/Medra4b/generation_config.json DELETED
@@ -1,13 +0,0 @@
1
- {
2
- "bos_token_id": 2,
3
- "cache_implementation": "hybrid",
4
- "do_sample": true,
5
- "eos_token_id": [
6
- 1,
7
- 106
8
- ],
9
- "pad_token_id": 0,
10
- "top_k": 64,
11
- "top_p": 0.95,
12
- "transformers_version": "4.51.3"
13
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
drwlf/Medra4b/model-00001-of-00002.safetensors DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:8151787edd24c435c57679993f89463a4146266eeda9a33a1e91edf68b6dbf10
3
- size 4961251752
 
 
 
 
drwlf/Medra4b/model-00002-of-00002.safetensors DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:e7a5fc41bdf0300e22face10f03663b2812194c1e2f3b925e2debaf21425149a
3
- size 3639026128
 
 
 
 
drwlf/Medra4b/model.safetensors.index.json DELETED
@@ -1,890 +0,0 @@
1
- {
2
- "metadata": {
3
- "total_size": 8600158944
4
- },
5
- "weight_map": {
6
- "language_model.model.embed_tokens.weight": "model-00001-of-00002.safetensors",
7
- "language_model.model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
8
- "language_model.model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
9
- "language_model.model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
10
- "language_model.model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
11
- "language_model.model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
12
- "language_model.model.layers.0.post_feedforward_layernorm.weight": "model-00001-of-00002.safetensors",
13
- "language_model.model.layers.0.pre_feedforward_layernorm.weight": "model-00001-of-00002.safetensors",
14
- "language_model.model.layers.0.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
15
- "language_model.model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
16
- "language_model.model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
17
- "language_model.model.layers.0.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
18
- "language_model.model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
19
- "language_model.model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
20
- "language_model.model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
21
- "language_model.model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
22
- "language_model.model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
23
- "language_model.model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
24
- "language_model.model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
25
- "language_model.model.layers.1.post_feedforward_layernorm.weight": "model-00001-of-00002.safetensors",
26
- "language_model.model.layers.1.pre_feedforward_layernorm.weight": "model-00001-of-00002.safetensors",
27
- "language_model.model.layers.1.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
28
- "language_model.model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
29
- "language_model.model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
30
- "language_model.model.layers.1.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
31
- "language_model.model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
32
- "language_model.model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
33
- "language_model.model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
34
- "language_model.model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
35
- "language_model.model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
36
- "language_model.model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
37
- "language_model.model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
38
- "language_model.model.layers.10.post_feedforward_layernorm.weight": "model-00001-of-00002.safetensors",
39
- "language_model.model.layers.10.pre_feedforward_layernorm.weight": "model-00001-of-00002.safetensors",
40
- "language_model.model.layers.10.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
41
- "language_model.model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
42
- "language_model.model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
43
- "language_model.model.layers.10.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
44
- "language_model.model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
45
- "language_model.model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
46
- "language_model.model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
47
- "language_model.model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
48
- "language_model.model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
49
- "language_model.model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
50
- "language_model.model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
51
- "language_model.model.layers.11.post_feedforward_layernorm.weight": "model-00001-of-00002.safetensors",
52
- "language_model.model.layers.11.pre_feedforward_layernorm.weight": "model-00001-of-00002.safetensors",
53
- "language_model.model.layers.11.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
54
- "language_model.model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
55
- "language_model.model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
56
- "language_model.model.layers.11.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
57
- "language_model.model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
58
- "language_model.model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
59
- "language_model.model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
60
- "language_model.model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
61
- "language_model.model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
62
- "language_model.model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
63
- "language_model.model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
64
- "language_model.model.layers.12.post_feedforward_layernorm.weight": "model-00001-of-00002.safetensors",
65
- "language_model.model.layers.12.pre_feedforward_layernorm.weight": "model-00001-of-00002.safetensors",
66
- "language_model.model.layers.12.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
67
- "language_model.model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
68
- "language_model.model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
69
- "language_model.model.layers.12.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
70
- "language_model.model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
71
- "language_model.model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
72
- "language_model.model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
73
- "language_model.model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
74
- "language_model.model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
75
- "language_model.model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
76
- "language_model.model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
77
- "language_model.model.layers.13.post_feedforward_layernorm.weight": "model-00001-of-00002.safetensors",
78
- "language_model.model.layers.13.pre_feedforward_layernorm.weight": "model-00001-of-00002.safetensors",
79
- "language_model.model.layers.13.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
80
- "language_model.model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
81
- "language_model.model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
82
- "language_model.model.layers.13.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
83
- "language_model.model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
84
- "language_model.model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
85
- "language_model.model.layers.14.input_layernorm.weight": "model-00002-of-00002.safetensors",
86
- "language_model.model.layers.14.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
87
- "language_model.model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
88
- "language_model.model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
89
- "language_model.model.layers.14.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
90
- "language_model.model.layers.14.post_feedforward_layernorm.weight": "model-00002-of-00002.safetensors",
91
- "language_model.model.layers.14.pre_feedforward_layernorm.weight": "model-00002-of-00002.safetensors",
92
- "language_model.model.layers.14.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
93
- "language_model.model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
94
- "language_model.model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
95
- "language_model.model.layers.14.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
96
- "language_model.model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
97
- "language_model.model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
98
- "language_model.model.layers.15.input_layernorm.weight": "model-00002-of-00002.safetensors",
99
- "language_model.model.layers.15.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
100
- "language_model.model.layers.15.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
101
- "language_model.model.layers.15.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
102
- "language_model.model.layers.15.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
103
- "language_model.model.layers.15.post_feedforward_layernorm.weight": "model-00002-of-00002.safetensors",
104
- "language_model.model.layers.15.pre_feedforward_layernorm.weight": "model-00002-of-00002.safetensors",
105
- "language_model.model.layers.15.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
106
- "language_model.model.layers.15.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
107
- "language_model.model.layers.15.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
108
- "language_model.model.layers.15.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
109
- "language_model.model.layers.15.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
110
- "language_model.model.layers.15.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
111
- "language_model.model.layers.16.input_layernorm.weight": "model-00002-of-00002.safetensors",
112
- "language_model.model.layers.16.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
113
- "language_model.model.layers.16.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
114
- "language_model.model.layers.16.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
115
- "language_model.model.layers.16.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
116
- "language_model.model.layers.16.post_feedforward_layernorm.weight": "model-00002-of-00002.safetensors",
117
- "language_model.model.layers.16.pre_feedforward_layernorm.weight": "model-00002-of-00002.safetensors",
118
- "language_model.model.layers.16.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
119
- "language_model.model.layers.16.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
120
- "language_model.model.layers.16.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
121
- "language_model.model.layers.16.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
122
- "language_model.model.layers.16.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
123
- "language_model.model.layers.16.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
124
- "language_model.model.layers.17.input_layernorm.weight": "model-00002-of-00002.safetensors",
125
- "language_model.model.layers.17.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
126
- "language_model.model.layers.17.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
127
- "language_model.model.layers.17.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
128
- "language_model.model.layers.17.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
129
- "language_model.model.layers.17.post_feedforward_layernorm.weight": "model-00002-of-00002.safetensors",
130
- "language_model.model.layers.17.pre_feedforward_layernorm.weight": "model-00002-of-00002.safetensors",
131
- "language_model.model.layers.17.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
132
- "language_model.model.layers.17.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
133
- "language_model.model.layers.17.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
134
- "language_model.model.layers.17.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
135
- "language_model.model.layers.17.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
136
- "language_model.model.layers.17.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
137
- "language_model.model.layers.18.input_layernorm.weight": "model-00002-of-00002.safetensors",
138
- "language_model.model.layers.18.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
139
- "language_model.model.layers.18.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
140
- "language_model.model.layers.18.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
141
- "language_model.model.layers.18.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
142
- "language_model.model.layers.18.post_feedforward_layernorm.weight": "model-00002-of-00002.safetensors",
143
- "language_model.model.layers.18.pre_feedforward_layernorm.weight": "model-00002-of-00002.safetensors",
144
- "language_model.model.layers.18.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
145
- "language_model.model.layers.18.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
146
- "language_model.model.layers.18.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
147
- "language_model.model.layers.18.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
148
- "language_model.model.layers.18.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
149
- "language_model.model.layers.18.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
150
- "language_model.model.layers.19.input_layernorm.weight": "model-00002-of-00002.safetensors",
151
- "language_model.model.layers.19.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
152
- "language_model.model.layers.19.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
153
- "language_model.model.layers.19.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
154
- "language_model.model.layers.19.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
155
- "language_model.model.layers.19.post_feedforward_layernorm.weight": "model-00002-of-00002.safetensors",
156
- "language_model.model.layers.19.pre_feedforward_layernorm.weight": "model-00002-of-00002.safetensors",
157
- "language_model.model.layers.19.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
158
- "language_model.model.layers.19.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
159
- "language_model.model.layers.19.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
160
- "language_model.model.layers.19.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
161
- "language_model.model.layers.19.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
162
- "language_model.model.layers.19.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
163
- "language_model.model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
164
- "language_model.model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
165
- "language_model.model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
166
- "language_model.model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
167
- "language_model.model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
168
- "language_model.model.layers.2.post_feedforward_layernorm.weight": "model-00001-of-00002.safetensors",
169
- "language_model.model.layers.2.pre_feedforward_layernorm.weight": "model-00001-of-00002.safetensors",
170
- "language_model.model.layers.2.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
171
- "language_model.model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
172
- "language_model.model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
173
- "language_model.model.layers.2.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
174
- "language_model.model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
175
- "language_model.model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
176
- "language_model.model.layers.20.input_layernorm.weight": "model-00002-of-00002.safetensors",
177
- "language_model.model.layers.20.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
178
- "language_model.model.layers.20.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
179
- "language_model.model.layers.20.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
180
- "language_model.model.layers.20.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
181
- "language_model.model.layers.20.post_feedforward_layernorm.weight": "model-00002-of-00002.safetensors",
182
- "language_model.model.layers.20.pre_feedforward_layernorm.weight": "model-00002-of-00002.safetensors",
183
- "language_model.model.layers.20.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
184
- "language_model.model.layers.20.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
185
- "language_model.model.layers.20.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
186
- "language_model.model.layers.20.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
187
- "language_model.model.layers.20.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
188
- "language_model.model.layers.20.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
189
- "language_model.model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
190
- "language_model.model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
191
- "language_model.model.layers.21.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
192
- "language_model.model.layers.21.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
193
- "language_model.model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
194
- "language_model.model.layers.21.post_feedforward_layernorm.weight": "model-00002-of-00002.safetensors",
195
- "language_model.model.layers.21.pre_feedforward_layernorm.weight": "model-00002-of-00002.safetensors",
196
- "language_model.model.layers.21.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
197
- "language_model.model.layers.21.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
198
- "language_model.model.layers.21.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
199
- "language_model.model.layers.21.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
200
- "language_model.model.layers.21.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
201
- "language_model.model.layers.21.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
202
- "language_model.model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
203
- "language_model.model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
204
- "language_model.model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
205
- "language_model.model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
206
- "language_model.model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
207
- "language_model.model.layers.22.post_feedforward_layernorm.weight": "model-00002-of-00002.safetensors",
208
- "language_model.model.layers.22.pre_feedforward_layernorm.weight": "model-00002-of-00002.safetensors",
209
- "language_model.model.layers.22.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
210
- "language_model.model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
211
- "language_model.model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
212
- "language_model.model.layers.22.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
213
- "language_model.model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
214
- "language_model.model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
215
- "language_model.model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
216
- "language_model.model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
217
- "language_model.model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
218
- "language_model.model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
219
- "language_model.model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
220
- "language_model.model.layers.23.post_feedforward_layernorm.weight": "model-00002-of-00002.safetensors",
221
- "language_model.model.layers.23.pre_feedforward_layernorm.weight": "model-00002-of-00002.safetensors",
222
- "language_model.model.layers.23.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
223
- "language_model.model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
224
- "language_model.model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
225
- "language_model.model.layers.23.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
226
- "language_model.model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
227
- "language_model.model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
228
- "language_model.model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
229
- "language_model.model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
230
- "language_model.model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
231
- "language_model.model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
232
- "language_model.model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
233
- "language_model.model.layers.24.post_feedforward_layernorm.weight": "model-00002-of-00002.safetensors",
234
- "language_model.model.layers.24.pre_feedforward_layernorm.weight": "model-00002-of-00002.safetensors",
235
- "language_model.model.layers.24.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
236
- "language_model.model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
237
- "language_model.model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
238
- "language_model.model.layers.24.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
239
- "language_model.model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
240
- "language_model.model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
241
- "language_model.model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
242
- "language_model.model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
243
- "language_model.model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
244
- "language_model.model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
245
- "language_model.model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
246
- "language_model.model.layers.25.post_feedforward_layernorm.weight": "model-00002-of-00002.safetensors",
247
- "language_model.model.layers.25.pre_feedforward_layernorm.weight": "model-00002-of-00002.safetensors",
248
- "language_model.model.layers.25.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
249
- "language_model.model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
250
- "language_model.model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
251
- "language_model.model.layers.25.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
252
- "language_model.model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
253
- "language_model.model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
254
- "language_model.model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
255
- "language_model.model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
256
- "language_model.model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
257
- "language_model.model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
258
- "language_model.model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
259
- "language_model.model.layers.26.post_feedforward_layernorm.weight": "model-00002-of-00002.safetensors",
260
- "language_model.model.layers.26.pre_feedforward_layernorm.weight": "model-00002-of-00002.safetensors",
261
- "language_model.model.layers.26.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
262
- "language_model.model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
263
- "language_model.model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
264
- "language_model.model.layers.26.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
265
- "language_model.model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
266
- "language_model.model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
267
- "language_model.model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
268
- "language_model.model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
269
- "language_model.model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
270
- "language_model.model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
271
- "language_model.model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
272
- "language_model.model.layers.27.post_feedforward_layernorm.weight": "model-00002-of-00002.safetensors",
273
- "language_model.model.layers.27.pre_feedforward_layernorm.weight": "model-00002-of-00002.safetensors",
274
- "language_model.model.layers.27.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
275
- "language_model.model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
276
- "language_model.model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
277
- "language_model.model.layers.27.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
278
- "language_model.model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
279
- "language_model.model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
280
- "language_model.model.layers.28.input_layernorm.weight": "model-00002-of-00002.safetensors",
281
- "language_model.model.layers.28.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
282
- "language_model.model.layers.28.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
283
- "language_model.model.layers.28.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
284
- "language_model.model.layers.28.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
285
- "language_model.model.layers.28.post_feedforward_layernorm.weight": "model-00002-of-00002.safetensors",
286
- "language_model.model.layers.28.pre_feedforward_layernorm.weight": "model-00002-of-00002.safetensors",
287
- "language_model.model.layers.28.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
288
- "language_model.model.layers.28.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
289
- "language_model.model.layers.28.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
290
- "language_model.model.layers.28.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
291
- "language_model.model.layers.28.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
292
- "language_model.model.layers.28.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
293
- "language_model.model.layers.29.input_layernorm.weight": "model-00002-of-00002.safetensors",
294
- "language_model.model.layers.29.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
295
- "language_model.model.layers.29.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
296
- "language_model.model.layers.29.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
297
- "language_model.model.layers.29.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
298
- "language_model.model.layers.29.post_feedforward_layernorm.weight": "model-00002-of-00002.safetensors",
299
- "language_model.model.layers.29.pre_feedforward_layernorm.weight": "model-00002-of-00002.safetensors",
300
- "language_model.model.layers.29.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
301
- "language_model.model.layers.29.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
302
- "language_model.model.layers.29.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
303
- "language_model.model.layers.29.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
304
- "language_model.model.layers.29.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
305
- "language_model.model.layers.29.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
306
- "language_model.model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
307
- "language_model.model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
308
- "language_model.model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
309
- "language_model.model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
310
- "language_model.model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
311
- "language_model.model.layers.3.post_feedforward_layernorm.weight": "model-00001-of-00002.safetensors",
312
- "language_model.model.layers.3.pre_feedforward_layernorm.weight": "model-00001-of-00002.safetensors",
313
- "language_model.model.layers.3.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
314
- "language_model.model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
315
- "language_model.model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
316
- "language_model.model.layers.3.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
317
- "language_model.model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
318
- "language_model.model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
319
- "language_model.model.layers.30.input_layernorm.weight": "model-00002-of-00002.safetensors",
320
- "language_model.model.layers.30.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
321
- "language_model.model.layers.30.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
322
- "language_model.model.layers.30.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
323
- "language_model.model.layers.30.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
324
- "language_model.model.layers.30.post_feedforward_layernorm.weight": "model-00002-of-00002.safetensors",
325
- "language_model.model.layers.30.pre_feedforward_layernorm.weight": "model-00002-of-00002.safetensors",
326
- "language_model.model.layers.30.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
327
- "language_model.model.layers.30.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
328
- "language_model.model.layers.30.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
329
- "language_model.model.layers.30.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
330
- "language_model.model.layers.30.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
331
- "language_model.model.layers.30.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
332
- "language_model.model.layers.31.input_layernorm.weight": "model-00002-of-00002.safetensors",
333
- "language_model.model.layers.31.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
334
- "language_model.model.layers.31.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
335
- "language_model.model.layers.31.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
336
- "language_model.model.layers.31.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
337
- "language_model.model.layers.31.post_feedforward_layernorm.weight": "model-00002-of-00002.safetensors",
338
- "language_model.model.layers.31.pre_feedforward_layernorm.weight": "model-00002-of-00002.safetensors",
339
- "language_model.model.layers.31.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
340
- "language_model.model.layers.31.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
341
- "language_model.model.layers.31.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
342
- "language_model.model.layers.31.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
343
- "language_model.model.layers.31.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
344
- "language_model.model.layers.31.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
345
- "language_model.model.layers.32.input_layernorm.weight": "model-00002-of-00002.safetensors",
346
- "language_model.model.layers.32.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
347
- "language_model.model.layers.32.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
348
- "language_model.model.layers.32.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
349
- "language_model.model.layers.32.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
350
- "language_model.model.layers.32.post_feedforward_layernorm.weight": "model-00002-of-00002.safetensors",
351
- "language_model.model.layers.32.pre_feedforward_layernorm.weight": "model-00002-of-00002.safetensors",
352
- "language_model.model.layers.32.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
353
- "language_model.model.layers.32.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
354
- "language_model.model.layers.32.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
355
- "language_model.model.layers.32.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
356
- "language_model.model.layers.32.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
357
- "language_model.model.layers.32.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
358
- "language_model.model.layers.33.input_layernorm.weight": "model-00002-of-00002.safetensors",
359
- "language_model.model.layers.33.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
360
- "language_model.model.layers.33.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
361
- "language_model.model.layers.33.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
362
- "language_model.model.layers.33.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
363
- "language_model.model.layers.33.post_feedforward_layernorm.weight": "model-00002-of-00002.safetensors",
364
- "language_model.model.layers.33.pre_feedforward_layernorm.weight": "model-00002-of-00002.safetensors",
365
- "language_model.model.layers.33.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
366
- "language_model.model.layers.33.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
367
- "language_model.model.layers.33.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
368
- "language_model.model.layers.33.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
369
- "language_model.model.layers.33.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
370
- "language_model.model.layers.33.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
371
- "language_model.model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
372
- "language_model.model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
373
- "language_model.model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
374
- "language_model.model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
375
- "language_model.model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
376
- "language_model.model.layers.4.post_feedforward_layernorm.weight": "model-00001-of-00002.safetensors",
377
- "language_model.model.layers.4.pre_feedforward_layernorm.weight": "model-00001-of-00002.safetensors",
378
- "language_model.model.layers.4.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
379
- "language_model.model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
380
- "language_model.model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
381
- "language_model.model.layers.4.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
382
- "language_model.model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
383
- "language_model.model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
384
- "language_model.model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
385
- "language_model.model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
386
- "language_model.model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
387
- "language_model.model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
388
- "language_model.model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
389
- "language_model.model.layers.5.post_feedforward_layernorm.weight": "model-00001-of-00002.safetensors",
390
- "language_model.model.layers.5.pre_feedforward_layernorm.weight": "model-00001-of-00002.safetensors",
391
- "language_model.model.layers.5.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
392
- "language_model.model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
393
- "language_model.model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
394
- "language_model.model.layers.5.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
395
- "language_model.model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
396
- "language_model.model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
397
- "language_model.model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
398
- "language_model.model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
399
- "language_model.model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
400
- "language_model.model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
401
- "language_model.model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
402
- "language_model.model.layers.6.post_feedforward_layernorm.weight": "model-00001-of-00002.safetensors",
403
- "language_model.model.layers.6.pre_feedforward_layernorm.weight": "model-00001-of-00002.safetensors",
404
- "language_model.model.layers.6.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
405
- "language_model.model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
406
- "language_model.model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
407
- "language_model.model.layers.6.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
408
- "language_model.model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
409
- "language_model.model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
410
- "language_model.model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
411
- "language_model.model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
412
- "language_model.model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
413
- "language_model.model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
414
- "language_model.model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
415
- "language_model.model.layers.7.post_feedforward_layernorm.weight": "model-00001-of-00002.safetensors",
416
- "language_model.model.layers.7.pre_feedforward_layernorm.weight": "model-00001-of-00002.safetensors",
417
- "language_model.model.layers.7.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
418
- "language_model.model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
419
- "language_model.model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
420
- "language_model.model.layers.7.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
421
- "language_model.model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
422
- "language_model.model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
423
- "language_model.model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
424
- "language_model.model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
425
- "language_model.model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
426
- "language_model.model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
427
- "language_model.model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
428
- "language_model.model.layers.8.post_feedforward_layernorm.weight": "model-00001-of-00002.safetensors",
429
- "language_model.model.layers.8.pre_feedforward_layernorm.weight": "model-00001-of-00002.safetensors",
430
- "language_model.model.layers.8.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
431
- "language_model.model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
432
- "language_model.model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
433
- "language_model.model.layers.8.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
434
- "language_model.model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
435
- "language_model.model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
436
- "language_model.model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
437
- "language_model.model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
438
- "language_model.model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
439
- "language_model.model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
440
- "language_model.model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
441
- "language_model.model.layers.9.post_feedforward_layernorm.weight": "model-00001-of-00002.safetensors",
442
- "language_model.model.layers.9.pre_feedforward_layernorm.weight": "model-00001-of-00002.safetensors",
443
- "language_model.model.layers.9.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
444
- "language_model.model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
445
- "language_model.model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
446
- "language_model.model.layers.9.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
447
- "language_model.model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
448
- "language_model.model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
449
- "language_model.model.norm.weight": "model-00002-of-00002.safetensors",
450
- "multi_modal_projector.mm_input_projection_weight": "model-00001-of-00002.safetensors",
451
- "multi_modal_projector.mm_soft_emb_norm.weight": "model-00001-of-00002.safetensors",
452
- "vision_tower.vision_model.embeddings.patch_embedding.bias": "model-00001-of-00002.safetensors",
453
- "vision_tower.vision_model.embeddings.patch_embedding.weight": "model-00001-of-00002.safetensors",
454
- "vision_tower.vision_model.embeddings.position_embedding.weight": "model-00001-of-00002.safetensors",
455
- "vision_tower.vision_model.encoder.layers.0.layer_norm1.bias": "model-00001-of-00002.safetensors",
456
- "vision_tower.vision_model.encoder.layers.0.layer_norm1.weight": "model-00001-of-00002.safetensors",
457
- "vision_tower.vision_model.encoder.layers.0.layer_norm2.bias": "model-00001-of-00002.safetensors",
458
- "vision_tower.vision_model.encoder.layers.0.layer_norm2.weight": "model-00001-of-00002.safetensors",
459
- "vision_tower.vision_model.encoder.layers.0.mlp.fc1.bias": "model-00001-of-00002.safetensors",
460
- "vision_tower.vision_model.encoder.layers.0.mlp.fc1.weight": "model-00001-of-00002.safetensors",
461
- "vision_tower.vision_model.encoder.layers.0.mlp.fc2.bias": "model-00001-of-00002.safetensors",
462
- "vision_tower.vision_model.encoder.layers.0.mlp.fc2.weight": "model-00001-of-00002.safetensors",
463
- "vision_tower.vision_model.encoder.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
464
- "vision_tower.vision_model.encoder.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
465
- "vision_tower.vision_model.encoder.layers.0.self_attn.out_proj.bias": "model-00001-of-00002.safetensors",
466
- "vision_tower.vision_model.encoder.layers.0.self_attn.out_proj.weight": "model-00001-of-00002.safetensors",
467
- "vision_tower.vision_model.encoder.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
468
- "vision_tower.vision_model.encoder.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
469
- "vision_tower.vision_model.encoder.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
470
- "vision_tower.vision_model.encoder.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
471
- "vision_tower.vision_model.encoder.layers.1.layer_norm1.bias": "model-00001-of-00002.safetensors",
472
- "vision_tower.vision_model.encoder.layers.1.layer_norm1.weight": "model-00001-of-00002.safetensors",
473
- "vision_tower.vision_model.encoder.layers.1.layer_norm2.bias": "model-00001-of-00002.safetensors",
474
- "vision_tower.vision_model.encoder.layers.1.layer_norm2.weight": "model-00001-of-00002.safetensors",
475
- "vision_tower.vision_model.encoder.layers.1.mlp.fc1.bias": "model-00001-of-00002.safetensors",
476
- "vision_tower.vision_model.encoder.layers.1.mlp.fc1.weight": "model-00001-of-00002.safetensors",
477
- "vision_tower.vision_model.encoder.layers.1.mlp.fc2.bias": "model-00001-of-00002.safetensors",
478
- "vision_tower.vision_model.encoder.layers.1.mlp.fc2.weight": "model-00001-of-00002.safetensors",
479
- "vision_tower.vision_model.encoder.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
480
- "vision_tower.vision_model.encoder.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
481
- "vision_tower.vision_model.encoder.layers.1.self_attn.out_proj.bias": "model-00001-of-00002.safetensors",
482
- "vision_tower.vision_model.encoder.layers.1.self_attn.out_proj.weight": "model-00001-of-00002.safetensors",
483
- "vision_tower.vision_model.encoder.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
484
- "vision_tower.vision_model.encoder.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
485
- "vision_tower.vision_model.encoder.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
486
- "vision_tower.vision_model.encoder.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
487
- "vision_tower.vision_model.encoder.layers.10.layer_norm1.bias": "model-00001-of-00002.safetensors",
488
- "vision_tower.vision_model.encoder.layers.10.layer_norm1.weight": "model-00001-of-00002.safetensors",
489
- "vision_tower.vision_model.encoder.layers.10.layer_norm2.bias": "model-00001-of-00002.safetensors",
490
- "vision_tower.vision_model.encoder.layers.10.layer_norm2.weight": "model-00001-of-00002.safetensors",
491
- "vision_tower.vision_model.encoder.layers.10.mlp.fc1.bias": "model-00001-of-00002.safetensors",
492
- "vision_tower.vision_model.encoder.layers.10.mlp.fc1.weight": "model-00001-of-00002.safetensors",
493
- "vision_tower.vision_model.encoder.layers.10.mlp.fc2.bias": "model-00001-of-00002.safetensors",
494
- "vision_tower.vision_model.encoder.layers.10.mlp.fc2.weight": "model-00001-of-00002.safetensors",
495
- "vision_tower.vision_model.encoder.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
496
- "vision_tower.vision_model.encoder.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
497
- "vision_tower.vision_model.encoder.layers.10.self_attn.out_proj.bias": "model-00001-of-00002.safetensors",
498
- "vision_tower.vision_model.encoder.layers.10.self_attn.out_proj.weight": "model-00001-of-00002.safetensors",
499
- "vision_tower.vision_model.encoder.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
500
- "vision_tower.vision_model.encoder.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
501
- "vision_tower.vision_model.encoder.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
502
- "vision_tower.vision_model.encoder.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
503
- "vision_tower.vision_model.encoder.layers.11.layer_norm1.bias": "model-00001-of-00002.safetensors",
504
- "vision_tower.vision_model.encoder.layers.11.layer_norm1.weight": "model-00001-of-00002.safetensors",
505
- "vision_tower.vision_model.encoder.layers.11.layer_norm2.bias": "model-00001-of-00002.safetensors",
506
- "vision_tower.vision_model.encoder.layers.11.layer_norm2.weight": "model-00001-of-00002.safetensors",
507
- "vision_tower.vision_model.encoder.layers.11.mlp.fc1.bias": "model-00001-of-00002.safetensors",
508
- "vision_tower.vision_model.encoder.layers.11.mlp.fc1.weight": "model-00001-of-00002.safetensors",
509
- "vision_tower.vision_model.encoder.layers.11.mlp.fc2.bias": "model-00001-of-00002.safetensors",
510
- "vision_tower.vision_model.encoder.layers.11.mlp.fc2.weight": "model-00001-of-00002.safetensors",
511
- "vision_tower.vision_model.encoder.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
512
- "vision_tower.vision_model.encoder.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
513
- "vision_tower.vision_model.encoder.layers.11.self_attn.out_proj.bias": "model-00001-of-00002.safetensors",
514
- "vision_tower.vision_model.encoder.layers.11.self_attn.out_proj.weight": "model-00001-of-00002.safetensors",
515
- "vision_tower.vision_model.encoder.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
516
- "vision_tower.vision_model.encoder.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
517
- "vision_tower.vision_model.encoder.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
518
- "vision_tower.vision_model.encoder.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
519
- "vision_tower.vision_model.encoder.layers.12.layer_norm1.bias": "model-00001-of-00002.safetensors",
520
- "vision_tower.vision_model.encoder.layers.12.layer_norm1.weight": "model-00001-of-00002.safetensors",
521
- "vision_tower.vision_model.encoder.layers.12.layer_norm2.bias": "model-00001-of-00002.safetensors",
522
- "vision_tower.vision_model.encoder.layers.12.layer_norm2.weight": "model-00001-of-00002.safetensors",
523
- "vision_tower.vision_model.encoder.layers.12.mlp.fc1.bias": "model-00001-of-00002.safetensors",
524
- "vision_tower.vision_model.encoder.layers.12.mlp.fc1.weight": "model-00001-of-00002.safetensors",
525
- "vision_tower.vision_model.encoder.layers.12.mlp.fc2.bias": "model-00001-of-00002.safetensors",
526
- "vision_tower.vision_model.encoder.layers.12.mlp.fc2.weight": "model-00001-of-00002.safetensors",
527
- "vision_tower.vision_model.encoder.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
528
- "vision_tower.vision_model.encoder.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
529
- "vision_tower.vision_model.encoder.layers.12.self_attn.out_proj.bias": "model-00001-of-00002.safetensors",
530
- "vision_tower.vision_model.encoder.layers.12.self_attn.out_proj.weight": "model-00001-of-00002.safetensors",
531
- "vision_tower.vision_model.encoder.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
532
- "vision_tower.vision_model.encoder.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
533
- "vision_tower.vision_model.encoder.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
534
- "vision_tower.vision_model.encoder.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
535
- "vision_tower.vision_model.encoder.layers.13.layer_norm1.bias": "model-00001-of-00002.safetensors",
536
- "vision_tower.vision_model.encoder.layers.13.layer_norm1.weight": "model-00001-of-00002.safetensors",
537
- "vision_tower.vision_model.encoder.layers.13.layer_norm2.bias": "model-00001-of-00002.safetensors",
538
- "vision_tower.vision_model.encoder.layers.13.layer_norm2.weight": "model-00001-of-00002.safetensors",
539
- "vision_tower.vision_model.encoder.layers.13.mlp.fc1.bias": "model-00001-of-00002.safetensors",
540
- "vision_tower.vision_model.encoder.layers.13.mlp.fc1.weight": "model-00001-of-00002.safetensors",
541
- "vision_tower.vision_model.encoder.layers.13.mlp.fc2.bias": "model-00001-of-00002.safetensors",
542
- "vision_tower.vision_model.encoder.layers.13.mlp.fc2.weight": "model-00001-of-00002.safetensors",
543
- "vision_tower.vision_model.encoder.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
544
- "vision_tower.vision_model.encoder.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
545
- "vision_tower.vision_model.encoder.layers.13.self_attn.out_proj.bias": "model-00001-of-00002.safetensors",
546
- "vision_tower.vision_model.encoder.layers.13.self_attn.out_proj.weight": "model-00001-of-00002.safetensors",
547
- "vision_tower.vision_model.encoder.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
548
- "vision_tower.vision_model.encoder.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
549
- "vision_tower.vision_model.encoder.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
550
- "vision_tower.vision_model.encoder.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
551
- "vision_tower.vision_model.encoder.layers.14.layer_norm1.bias": "model-00001-of-00002.safetensors",
552
- "vision_tower.vision_model.encoder.layers.14.layer_norm1.weight": "model-00001-of-00002.safetensors",
553
- "vision_tower.vision_model.encoder.layers.14.layer_norm2.bias": "model-00001-of-00002.safetensors",
554
- "vision_tower.vision_model.encoder.layers.14.layer_norm2.weight": "model-00001-of-00002.safetensors",
555
- "vision_tower.vision_model.encoder.layers.14.mlp.fc1.bias": "model-00001-of-00002.safetensors",
556
- "vision_tower.vision_model.encoder.layers.14.mlp.fc1.weight": "model-00001-of-00002.safetensors",
557
- "vision_tower.vision_model.encoder.layers.14.mlp.fc2.bias": "model-00001-of-00002.safetensors",
558
- "vision_tower.vision_model.encoder.layers.14.mlp.fc2.weight": "model-00001-of-00002.safetensors",
559
- "vision_tower.vision_model.encoder.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
560
- "vision_tower.vision_model.encoder.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
561
- "vision_tower.vision_model.encoder.layers.14.self_attn.out_proj.bias": "model-00001-of-00002.safetensors",
562
- "vision_tower.vision_model.encoder.layers.14.self_attn.out_proj.weight": "model-00001-of-00002.safetensors",
563
- "vision_tower.vision_model.encoder.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
564
- "vision_tower.vision_model.encoder.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
565
- "vision_tower.vision_model.encoder.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
566
- "vision_tower.vision_model.encoder.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
567
- "vision_tower.vision_model.encoder.layers.15.layer_norm1.bias": "model-00001-of-00002.safetensors",
568
- "vision_tower.vision_model.encoder.layers.15.layer_norm1.weight": "model-00001-of-00002.safetensors",
569
- "vision_tower.vision_model.encoder.layers.15.layer_norm2.bias": "model-00001-of-00002.safetensors",
570
- "vision_tower.vision_model.encoder.layers.15.layer_norm2.weight": "model-00001-of-00002.safetensors",
571
- "vision_tower.vision_model.encoder.layers.15.mlp.fc1.bias": "model-00001-of-00002.safetensors",
572
- "vision_tower.vision_model.encoder.layers.15.mlp.fc1.weight": "model-00001-of-00002.safetensors",
573
- "vision_tower.vision_model.encoder.layers.15.mlp.fc2.bias": "model-00001-of-00002.safetensors",
574
- "vision_tower.vision_model.encoder.layers.15.mlp.fc2.weight": "model-00001-of-00002.safetensors",
575
- "vision_tower.vision_model.encoder.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
576
- "vision_tower.vision_model.encoder.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
577
- "vision_tower.vision_model.encoder.layers.15.self_attn.out_proj.bias": "model-00001-of-00002.safetensors",
578
- "vision_tower.vision_model.encoder.layers.15.self_attn.out_proj.weight": "model-00001-of-00002.safetensors",
579
- "vision_tower.vision_model.encoder.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
580
- "vision_tower.vision_model.encoder.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
581
- "vision_tower.vision_model.encoder.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
582
- "vision_tower.vision_model.encoder.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
583
- "vision_tower.vision_model.encoder.layers.16.layer_norm1.bias": "model-00001-of-00002.safetensors",
584
- "vision_tower.vision_model.encoder.layers.16.layer_norm1.weight": "model-00001-of-00002.safetensors",
585
- "vision_tower.vision_model.encoder.layers.16.layer_norm2.bias": "model-00001-of-00002.safetensors",
586
- "vision_tower.vision_model.encoder.layers.16.layer_norm2.weight": "model-00001-of-00002.safetensors",
587
- "vision_tower.vision_model.encoder.layers.16.mlp.fc1.bias": "model-00001-of-00002.safetensors",
588
- "vision_tower.vision_model.encoder.layers.16.mlp.fc1.weight": "model-00001-of-00002.safetensors",
589
- "vision_tower.vision_model.encoder.layers.16.mlp.fc2.bias": "model-00001-of-00002.safetensors",
590
- "vision_tower.vision_model.encoder.layers.16.mlp.fc2.weight": "model-00001-of-00002.safetensors",
591
- "vision_tower.vision_model.encoder.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
592
- "vision_tower.vision_model.encoder.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
593
- "vision_tower.vision_model.encoder.layers.16.self_attn.out_proj.bias": "model-00001-of-00002.safetensors",
594
- "vision_tower.vision_model.encoder.layers.16.self_attn.out_proj.weight": "model-00001-of-00002.safetensors",
595
- "vision_tower.vision_model.encoder.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
596
- "vision_tower.vision_model.encoder.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
597
- "vision_tower.vision_model.encoder.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
598
- "vision_tower.vision_model.encoder.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
599
- "vision_tower.vision_model.encoder.layers.17.layer_norm1.bias": "model-00001-of-00002.safetensors",
600
- "vision_tower.vision_model.encoder.layers.17.layer_norm1.weight": "model-00001-of-00002.safetensors",
601
- "vision_tower.vision_model.encoder.layers.17.layer_norm2.bias": "model-00001-of-00002.safetensors",
602
- "vision_tower.vision_model.encoder.layers.17.layer_norm2.weight": "model-00001-of-00002.safetensors",
603
- "vision_tower.vision_model.encoder.layers.17.mlp.fc1.bias": "model-00001-of-00002.safetensors",
604
- "vision_tower.vision_model.encoder.layers.17.mlp.fc1.weight": "model-00001-of-00002.safetensors",
605
- "vision_tower.vision_model.encoder.layers.17.mlp.fc2.bias": "model-00001-of-00002.safetensors",
606
- "vision_tower.vision_model.encoder.layers.17.mlp.fc2.weight": "model-00001-of-00002.safetensors",
607
- "vision_tower.vision_model.encoder.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
608
- "vision_tower.vision_model.encoder.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
609
- "vision_tower.vision_model.encoder.layers.17.self_attn.out_proj.bias": "model-00001-of-00002.safetensors",
610
- "vision_tower.vision_model.encoder.layers.17.self_attn.out_proj.weight": "model-00001-of-00002.safetensors",
611
- "vision_tower.vision_model.encoder.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
612
- "vision_tower.vision_model.encoder.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
613
- "vision_tower.vision_model.encoder.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
614
- "vision_tower.vision_model.encoder.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
615
- "vision_tower.vision_model.encoder.layers.18.layer_norm1.bias": "model-00001-of-00002.safetensors",
616
- "vision_tower.vision_model.encoder.layers.18.layer_norm1.weight": "model-00001-of-00002.safetensors",
617
- "vision_tower.vision_model.encoder.layers.18.layer_norm2.bias": "model-00001-of-00002.safetensors",
618
- "vision_tower.vision_model.encoder.layers.18.layer_norm2.weight": "model-00001-of-00002.safetensors",
619
- "vision_tower.vision_model.encoder.layers.18.mlp.fc1.bias": "model-00001-of-00002.safetensors",
620
- "vision_tower.vision_model.encoder.layers.18.mlp.fc1.weight": "model-00001-of-00002.safetensors",
621
- "vision_tower.vision_model.encoder.layers.18.mlp.fc2.bias": "model-00001-of-00002.safetensors",
622
- "vision_tower.vision_model.encoder.layers.18.mlp.fc2.weight": "model-00001-of-00002.safetensors",
623
- "vision_tower.vision_model.encoder.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
624
- "vision_tower.vision_model.encoder.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
625
- "vision_tower.vision_model.encoder.layers.18.self_attn.out_proj.bias": "model-00001-of-00002.safetensors",
626
- "vision_tower.vision_model.encoder.layers.18.self_attn.out_proj.weight": "model-00001-of-00002.safetensors",
627
- "vision_tower.vision_model.encoder.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
628
- "vision_tower.vision_model.encoder.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
629
- "vision_tower.vision_model.encoder.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
630
- "vision_tower.vision_model.encoder.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
631
- "vision_tower.vision_model.encoder.layers.19.layer_norm1.bias": "model-00001-of-00002.safetensors",
632
- "vision_tower.vision_model.encoder.layers.19.layer_norm1.weight": "model-00001-of-00002.safetensors",
633
- "vision_tower.vision_model.encoder.layers.19.layer_norm2.bias": "model-00001-of-00002.safetensors",
634
- "vision_tower.vision_model.encoder.layers.19.layer_norm2.weight": "model-00001-of-00002.safetensors",
635
- "vision_tower.vision_model.encoder.layers.19.mlp.fc1.bias": "model-00001-of-00002.safetensors",
636
- "vision_tower.vision_model.encoder.layers.19.mlp.fc1.weight": "model-00001-of-00002.safetensors",
637
- "vision_tower.vision_model.encoder.layers.19.mlp.fc2.bias": "model-00001-of-00002.safetensors",
638
- "vision_tower.vision_model.encoder.layers.19.mlp.fc2.weight": "model-00001-of-00002.safetensors",
639
- "vision_tower.vision_model.encoder.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
640
- "vision_tower.vision_model.encoder.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
641
- "vision_tower.vision_model.encoder.layers.19.self_attn.out_proj.bias": "model-00001-of-00002.safetensors",
642
- "vision_tower.vision_model.encoder.layers.19.self_attn.out_proj.weight": "model-00001-of-00002.safetensors",
643
- "vision_tower.vision_model.encoder.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
644
- "vision_tower.vision_model.encoder.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
645
- "vision_tower.vision_model.encoder.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
646
- "vision_tower.vision_model.encoder.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
647
- "vision_tower.vision_model.encoder.layers.2.layer_norm1.bias": "model-00001-of-00002.safetensors",
648
- "vision_tower.vision_model.encoder.layers.2.layer_norm1.weight": "model-00001-of-00002.safetensors",
649
- "vision_tower.vision_model.encoder.layers.2.layer_norm2.bias": "model-00001-of-00002.safetensors",
650
- "vision_tower.vision_model.encoder.layers.2.layer_norm2.weight": "model-00001-of-00002.safetensors",
651
- "vision_tower.vision_model.encoder.layers.2.mlp.fc1.bias": "model-00001-of-00002.safetensors",
652
- "vision_tower.vision_model.encoder.layers.2.mlp.fc1.weight": "model-00001-of-00002.safetensors",
653
- "vision_tower.vision_model.encoder.layers.2.mlp.fc2.bias": "model-00001-of-00002.safetensors",
654
- "vision_tower.vision_model.encoder.layers.2.mlp.fc2.weight": "model-00001-of-00002.safetensors",
655
- "vision_tower.vision_model.encoder.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
656
- "vision_tower.vision_model.encoder.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
657
- "vision_tower.vision_model.encoder.layers.2.self_attn.out_proj.bias": "model-00001-of-00002.safetensors",
658
- "vision_tower.vision_model.encoder.layers.2.self_attn.out_proj.weight": "model-00001-of-00002.safetensors",
659
- "vision_tower.vision_model.encoder.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
660
- "vision_tower.vision_model.encoder.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
661
- "vision_tower.vision_model.encoder.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
662
- "vision_tower.vision_model.encoder.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
663
- "vision_tower.vision_model.encoder.layers.20.layer_norm1.bias": "model-00001-of-00002.safetensors",
664
- "vision_tower.vision_model.encoder.layers.20.layer_norm1.weight": "model-00001-of-00002.safetensors",
665
- "vision_tower.vision_model.encoder.layers.20.layer_norm2.bias": "model-00001-of-00002.safetensors",
666
- "vision_tower.vision_model.encoder.layers.20.layer_norm2.weight": "model-00001-of-00002.safetensors",
667
- "vision_tower.vision_model.encoder.layers.20.mlp.fc1.bias": "model-00001-of-00002.safetensors",
668
- "vision_tower.vision_model.encoder.layers.20.mlp.fc1.weight": "model-00001-of-00002.safetensors",
669
- "vision_tower.vision_model.encoder.layers.20.mlp.fc2.bias": "model-00001-of-00002.safetensors",
670
- "vision_tower.vision_model.encoder.layers.20.mlp.fc2.weight": "model-00001-of-00002.safetensors",
671
- "vision_tower.vision_model.encoder.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
672
- "vision_tower.vision_model.encoder.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
673
- "vision_tower.vision_model.encoder.layers.20.self_attn.out_proj.bias": "model-00001-of-00002.safetensors",
674
- "vision_tower.vision_model.encoder.layers.20.self_attn.out_proj.weight": "model-00001-of-00002.safetensors",
675
- "vision_tower.vision_model.encoder.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
676
- "vision_tower.vision_model.encoder.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
677
- "vision_tower.vision_model.encoder.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
678
- "vision_tower.vision_model.encoder.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
679
- "vision_tower.vision_model.encoder.layers.21.layer_norm1.bias": "model-00001-of-00002.safetensors",
680
- "vision_tower.vision_model.encoder.layers.21.layer_norm1.weight": "model-00001-of-00002.safetensors",
681
- "vision_tower.vision_model.encoder.layers.21.layer_norm2.bias": "model-00001-of-00002.safetensors",
682
- "vision_tower.vision_model.encoder.layers.21.layer_norm2.weight": "model-00001-of-00002.safetensors",
683
- "vision_tower.vision_model.encoder.layers.21.mlp.fc1.bias": "model-00001-of-00002.safetensors",
684
- "vision_tower.vision_model.encoder.layers.21.mlp.fc1.weight": "model-00001-of-00002.safetensors",
685
- "vision_tower.vision_model.encoder.layers.21.mlp.fc2.bias": "model-00001-of-00002.safetensors",
686
- "vision_tower.vision_model.encoder.layers.21.mlp.fc2.weight": "model-00001-of-00002.safetensors",
687
- "vision_tower.vision_model.encoder.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
688
- "vision_tower.vision_model.encoder.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
689
- "vision_tower.vision_model.encoder.layers.21.self_attn.out_proj.bias": "model-00001-of-00002.safetensors",
690
- "vision_tower.vision_model.encoder.layers.21.self_attn.out_proj.weight": "model-00001-of-00002.safetensors",
691
- "vision_tower.vision_model.encoder.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
692
- "vision_tower.vision_model.encoder.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
693
- "vision_tower.vision_model.encoder.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
694
- "vision_tower.vision_model.encoder.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
695
- "vision_tower.vision_model.encoder.layers.22.layer_norm1.bias": "model-00001-of-00002.safetensors",
696
- "vision_tower.vision_model.encoder.layers.22.layer_norm1.weight": "model-00001-of-00002.safetensors",
697
- "vision_tower.vision_model.encoder.layers.22.layer_norm2.bias": "model-00001-of-00002.safetensors",
698
- "vision_tower.vision_model.encoder.layers.22.layer_norm2.weight": "model-00001-of-00002.safetensors",
699
- "vision_tower.vision_model.encoder.layers.22.mlp.fc1.bias": "model-00001-of-00002.safetensors",
700
- "vision_tower.vision_model.encoder.layers.22.mlp.fc1.weight": "model-00001-of-00002.safetensors",
701
- "vision_tower.vision_model.encoder.layers.22.mlp.fc2.bias": "model-00001-of-00002.safetensors",
702
- "vision_tower.vision_model.encoder.layers.22.mlp.fc2.weight": "model-00001-of-00002.safetensors",
703
- "vision_tower.vision_model.encoder.layers.22.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
704
- "vision_tower.vision_model.encoder.layers.22.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
705
- "vision_tower.vision_model.encoder.layers.22.self_attn.out_proj.bias": "model-00001-of-00002.safetensors",
706
- "vision_tower.vision_model.encoder.layers.22.self_attn.out_proj.weight": "model-00001-of-00002.safetensors",
707
- "vision_tower.vision_model.encoder.layers.22.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
708
- "vision_tower.vision_model.encoder.layers.22.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
709
- "vision_tower.vision_model.encoder.layers.22.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
710
- "vision_tower.vision_model.encoder.layers.22.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
711
- "vision_tower.vision_model.encoder.layers.23.layer_norm1.bias": "model-00001-of-00002.safetensors",
712
- "vision_tower.vision_model.encoder.layers.23.layer_norm1.weight": "model-00001-of-00002.safetensors",
713
- "vision_tower.vision_model.encoder.layers.23.layer_norm2.bias": "model-00001-of-00002.safetensors",
714
- "vision_tower.vision_model.encoder.layers.23.layer_norm2.weight": "model-00001-of-00002.safetensors",
715
- "vision_tower.vision_model.encoder.layers.23.mlp.fc1.bias": "model-00001-of-00002.safetensors",
716
- "vision_tower.vision_model.encoder.layers.23.mlp.fc1.weight": "model-00001-of-00002.safetensors",
717
- "vision_tower.vision_model.encoder.layers.23.mlp.fc2.bias": "model-00001-of-00002.safetensors",
718
- "vision_tower.vision_model.encoder.layers.23.mlp.fc2.weight": "model-00001-of-00002.safetensors",
719
- "vision_tower.vision_model.encoder.layers.23.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
720
- "vision_tower.vision_model.encoder.layers.23.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
721
- "vision_tower.vision_model.encoder.layers.23.self_attn.out_proj.bias": "model-00001-of-00002.safetensors",
722
- "vision_tower.vision_model.encoder.layers.23.self_attn.out_proj.weight": "model-00001-of-00002.safetensors",
723
- "vision_tower.vision_model.encoder.layers.23.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
724
- "vision_tower.vision_model.encoder.layers.23.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
725
- "vision_tower.vision_model.encoder.layers.23.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
726
- "vision_tower.vision_model.encoder.layers.23.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
727
- "vision_tower.vision_model.encoder.layers.24.layer_norm1.bias": "model-00001-of-00002.safetensors",
728
- "vision_tower.vision_model.encoder.layers.24.layer_norm1.weight": "model-00001-of-00002.safetensors",
729
- "vision_tower.vision_model.encoder.layers.24.layer_norm2.bias": "model-00001-of-00002.safetensors",
730
- "vision_tower.vision_model.encoder.layers.24.layer_norm2.weight": "model-00001-of-00002.safetensors",
731
- "vision_tower.vision_model.encoder.layers.24.mlp.fc1.bias": "model-00001-of-00002.safetensors",
732
- "vision_tower.vision_model.encoder.layers.24.mlp.fc1.weight": "model-00001-of-00002.safetensors",
733
- "vision_tower.vision_model.encoder.layers.24.mlp.fc2.bias": "model-00001-of-00002.safetensors",
734
- "vision_tower.vision_model.encoder.layers.24.mlp.fc2.weight": "model-00001-of-00002.safetensors",
735
- "vision_tower.vision_model.encoder.layers.24.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
736
- "vision_tower.vision_model.encoder.layers.24.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
737
- "vision_tower.vision_model.encoder.layers.24.self_attn.out_proj.bias": "model-00001-of-00002.safetensors",
738
- "vision_tower.vision_model.encoder.layers.24.self_attn.out_proj.weight": "model-00001-of-00002.safetensors",
739
- "vision_tower.vision_model.encoder.layers.24.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
740
- "vision_tower.vision_model.encoder.layers.24.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
741
- "vision_tower.vision_model.encoder.layers.24.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
742
- "vision_tower.vision_model.encoder.layers.24.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
743
- "vision_tower.vision_model.encoder.layers.25.layer_norm1.bias": "model-00001-of-00002.safetensors",
744
- "vision_tower.vision_model.encoder.layers.25.layer_norm1.weight": "model-00001-of-00002.safetensors",
745
- "vision_tower.vision_model.encoder.layers.25.layer_norm2.bias": "model-00001-of-00002.safetensors",
746
- "vision_tower.vision_model.encoder.layers.25.layer_norm2.weight": "model-00001-of-00002.safetensors",
747
- "vision_tower.vision_model.encoder.layers.25.mlp.fc1.bias": "model-00001-of-00002.safetensors",
748
- "vision_tower.vision_model.encoder.layers.25.mlp.fc1.weight": "model-00001-of-00002.safetensors",
749
- "vision_tower.vision_model.encoder.layers.25.mlp.fc2.bias": "model-00001-of-00002.safetensors",
750
- "vision_tower.vision_model.encoder.layers.25.mlp.fc2.weight": "model-00001-of-00002.safetensors",
751
- "vision_tower.vision_model.encoder.layers.25.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
752
- "vision_tower.vision_model.encoder.layers.25.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
753
- "vision_tower.vision_model.encoder.layers.25.self_attn.out_proj.bias": "model-00001-of-00002.safetensors",
754
- "vision_tower.vision_model.encoder.layers.25.self_attn.out_proj.weight": "model-00001-of-00002.safetensors",
755
- "vision_tower.vision_model.encoder.layers.25.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
756
- "vision_tower.vision_model.encoder.layers.25.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
757
- "vision_tower.vision_model.encoder.layers.25.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
758
- "vision_tower.vision_model.encoder.layers.25.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
759
- "vision_tower.vision_model.encoder.layers.26.layer_norm1.bias": "model-00001-of-00002.safetensors",
760
- "vision_tower.vision_model.encoder.layers.26.layer_norm1.weight": "model-00001-of-00002.safetensors",
761
- "vision_tower.vision_model.encoder.layers.26.layer_norm2.bias": "model-00001-of-00002.safetensors",
762
- "vision_tower.vision_model.encoder.layers.26.layer_norm2.weight": "model-00001-of-00002.safetensors",
763
- "vision_tower.vision_model.encoder.layers.26.mlp.fc1.bias": "model-00001-of-00002.safetensors",
764
- "vision_tower.vision_model.encoder.layers.26.mlp.fc1.weight": "model-00001-of-00002.safetensors",
765
- "vision_tower.vision_model.encoder.layers.26.mlp.fc2.bias": "model-00001-of-00002.safetensors",
766
- "vision_tower.vision_model.encoder.layers.26.mlp.fc2.weight": "model-00001-of-00002.safetensors",
767
- "vision_tower.vision_model.encoder.layers.26.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
768
- "vision_tower.vision_model.encoder.layers.26.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
769
- "vision_tower.vision_model.encoder.layers.26.self_attn.out_proj.bias": "model-00001-of-00002.safetensors",
770
- "vision_tower.vision_model.encoder.layers.26.self_attn.out_proj.weight": "model-00001-of-00002.safetensors",
771
- "vision_tower.vision_model.encoder.layers.26.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
772
- "vision_tower.vision_model.encoder.layers.26.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
773
- "vision_tower.vision_model.encoder.layers.26.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
774
- "vision_tower.vision_model.encoder.layers.26.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
775
- "vision_tower.vision_model.encoder.layers.3.layer_norm1.bias": "model-00001-of-00002.safetensors",
776
- "vision_tower.vision_model.encoder.layers.3.layer_norm1.weight": "model-00001-of-00002.safetensors",
777
- "vision_tower.vision_model.encoder.layers.3.layer_norm2.bias": "model-00001-of-00002.safetensors",
778
- "vision_tower.vision_model.encoder.layers.3.layer_norm2.weight": "model-00001-of-00002.safetensors",
779
- "vision_tower.vision_model.encoder.layers.3.mlp.fc1.bias": "model-00001-of-00002.safetensors",
780
- "vision_tower.vision_model.encoder.layers.3.mlp.fc1.weight": "model-00001-of-00002.safetensors",
781
- "vision_tower.vision_model.encoder.layers.3.mlp.fc2.bias": "model-00001-of-00002.safetensors",
782
- "vision_tower.vision_model.encoder.layers.3.mlp.fc2.weight": "model-00001-of-00002.safetensors",
783
- "vision_tower.vision_model.encoder.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
784
- "vision_tower.vision_model.encoder.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
785
- "vision_tower.vision_model.encoder.layers.3.self_attn.out_proj.bias": "model-00001-of-00002.safetensors",
786
- "vision_tower.vision_model.encoder.layers.3.self_attn.out_proj.weight": "model-00001-of-00002.safetensors",
787
- "vision_tower.vision_model.encoder.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
788
- "vision_tower.vision_model.encoder.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
789
- "vision_tower.vision_model.encoder.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
790
- "vision_tower.vision_model.encoder.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
791
- "vision_tower.vision_model.encoder.layers.4.layer_norm1.bias": "model-00001-of-00002.safetensors",
792
- "vision_tower.vision_model.encoder.layers.4.layer_norm1.weight": "model-00001-of-00002.safetensors",
793
- "vision_tower.vision_model.encoder.layers.4.layer_norm2.bias": "model-00001-of-00002.safetensors",
794
- "vision_tower.vision_model.encoder.layers.4.layer_norm2.weight": "model-00001-of-00002.safetensors",
795
- "vision_tower.vision_model.encoder.layers.4.mlp.fc1.bias": "model-00001-of-00002.safetensors",
796
- "vision_tower.vision_model.encoder.layers.4.mlp.fc1.weight": "model-00001-of-00002.safetensors",
797
- "vision_tower.vision_model.encoder.layers.4.mlp.fc2.bias": "model-00001-of-00002.safetensors",
798
- "vision_tower.vision_model.encoder.layers.4.mlp.fc2.weight": "model-00001-of-00002.safetensors",
799
- "vision_tower.vision_model.encoder.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
800
- "vision_tower.vision_model.encoder.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
801
- "vision_tower.vision_model.encoder.layers.4.self_attn.out_proj.bias": "model-00001-of-00002.safetensors",
802
- "vision_tower.vision_model.encoder.layers.4.self_attn.out_proj.weight": "model-00001-of-00002.safetensors",
803
- "vision_tower.vision_model.encoder.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
804
- "vision_tower.vision_model.encoder.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
805
- "vision_tower.vision_model.encoder.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
806
- "vision_tower.vision_model.encoder.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
807
- "vision_tower.vision_model.encoder.layers.5.layer_norm1.bias": "model-00001-of-00002.safetensors",
808
- "vision_tower.vision_model.encoder.layers.5.layer_norm1.weight": "model-00001-of-00002.safetensors",
809
- "vision_tower.vision_model.encoder.layers.5.layer_norm2.bias": "model-00001-of-00002.safetensors",
810
- "vision_tower.vision_model.encoder.layers.5.layer_norm2.weight": "model-00001-of-00002.safetensors",
811
- "vision_tower.vision_model.encoder.layers.5.mlp.fc1.bias": "model-00001-of-00002.safetensors",
812
- "vision_tower.vision_model.encoder.layers.5.mlp.fc1.weight": "model-00001-of-00002.safetensors",
813
- "vision_tower.vision_model.encoder.layers.5.mlp.fc2.bias": "model-00001-of-00002.safetensors",
814
- "vision_tower.vision_model.encoder.layers.5.mlp.fc2.weight": "model-00001-of-00002.safetensors",
815
- "vision_tower.vision_model.encoder.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
816
- "vision_tower.vision_model.encoder.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
817
- "vision_tower.vision_model.encoder.layers.5.self_attn.out_proj.bias": "model-00001-of-00002.safetensors",
818
- "vision_tower.vision_model.encoder.layers.5.self_attn.out_proj.weight": "model-00001-of-00002.safetensors",
819
- "vision_tower.vision_model.encoder.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
820
- "vision_tower.vision_model.encoder.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
821
- "vision_tower.vision_model.encoder.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
822
- "vision_tower.vision_model.encoder.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
823
- "vision_tower.vision_model.encoder.layers.6.layer_norm1.bias": "model-00001-of-00002.safetensors",
824
- "vision_tower.vision_model.encoder.layers.6.layer_norm1.weight": "model-00001-of-00002.safetensors",
825
- "vision_tower.vision_model.encoder.layers.6.layer_norm2.bias": "model-00001-of-00002.safetensors",
826
- "vision_tower.vision_model.encoder.layers.6.layer_norm2.weight": "model-00001-of-00002.safetensors",
827
- "vision_tower.vision_model.encoder.layers.6.mlp.fc1.bias": "model-00001-of-00002.safetensors",
828
- "vision_tower.vision_model.encoder.layers.6.mlp.fc1.weight": "model-00001-of-00002.safetensors",
829
- "vision_tower.vision_model.encoder.layers.6.mlp.fc2.bias": "model-00001-of-00002.safetensors",
830
- "vision_tower.vision_model.encoder.layers.6.mlp.fc2.weight": "model-00001-of-00002.safetensors",
831
- "vision_tower.vision_model.encoder.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
832
- "vision_tower.vision_model.encoder.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
833
- "vision_tower.vision_model.encoder.layers.6.self_attn.out_proj.bias": "model-00001-of-00002.safetensors",
834
- "vision_tower.vision_model.encoder.layers.6.self_attn.out_proj.weight": "model-00001-of-00002.safetensors",
835
- "vision_tower.vision_model.encoder.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
836
- "vision_tower.vision_model.encoder.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
837
- "vision_tower.vision_model.encoder.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
838
- "vision_tower.vision_model.encoder.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
839
- "vision_tower.vision_model.encoder.layers.7.layer_norm1.bias": "model-00001-of-00002.safetensors",
840
- "vision_tower.vision_model.encoder.layers.7.layer_norm1.weight": "model-00001-of-00002.safetensors",
841
- "vision_tower.vision_model.encoder.layers.7.layer_norm2.bias": "model-00001-of-00002.safetensors",
842
- "vision_tower.vision_model.encoder.layers.7.layer_norm2.weight": "model-00001-of-00002.safetensors",
843
- "vision_tower.vision_model.encoder.layers.7.mlp.fc1.bias": "model-00001-of-00002.safetensors",
844
- "vision_tower.vision_model.encoder.layers.7.mlp.fc1.weight": "model-00001-of-00002.safetensors",
845
- "vision_tower.vision_model.encoder.layers.7.mlp.fc2.bias": "model-00001-of-00002.safetensors",
846
- "vision_tower.vision_model.encoder.layers.7.mlp.fc2.weight": "model-00001-of-00002.safetensors",
847
- "vision_tower.vision_model.encoder.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
848
- "vision_tower.vision_model.encoder.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
849
- "vision_tower.vision_model.encoder.layers.7.self_attn.out_proj.bias": "model-00001-of-00002.safetensors",
850
- "vision_tower.vision_model.encoder.layers.7.self_attn.out_proj.weight": "model-00001-of-00002.safetensors",
851
- "vision_tower.vision_model.encoder.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
852
- "vision_tower.vision_model.encoder.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
853
- "vision_tower.vision_model.encoder.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
854
- "vision_tower.vision_model.encoder.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
855
- "vision_tower.vision_model.encoder.layers.8.layer_norm1.bias": "model-00001-of-00002.safetensors",
856
- "vision_tower.vision_model.encoder.layers.8.layer_norm1.weight": "model-00001-of-00002.safetensors",
857
- "vision_tower.vision_model.encoder.layers.8.layer_norm2.bias": "model-00001-of-00002.safetensors",
858
- "vision_tower.vision_model.encoder.layers.8.layer_norm2.weight": "model-00001-of-00002.safetensors",
859
- "vision_tower.vision_model.encoder.layers.8.mlp.fc1.bias": "model-00001-of-00002.safetensors",
860
- "vision_tower.vision_model.encoder.layers.8.mlp.fc1.weight": "model-00001-of-00002.safetensors",
861
- "vision_tower.vision_model.encoder.layers.8.mlp.fc2.bias": "model-00001-of-00002.safetensors",
862
- "vision_tower.vision_model.encoder.layers.8.mlp.fc2.weight": "model-00001-of-00002.safetensors",
863
- "vision_tower.vision_model.encoder.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
864
- "vision_tower.vision_model.encoder.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
865
- "vision_tower.vision_model.encoder.layers.8.self_attn.out_proj.bias": "model-00001-of-00002.safetensors",
866
- "vision_tower.vision_model.encoder.layers.8.self_attn.out_proj.weight": "model-00001-of-00002.safetensors",
867
- "vision_tower.vision_model.encoder.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
868
- "vision_tower.vision_model.encoder.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
869
- "vision_tower.vision_model.encoder.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
870
- "vision_tower.vision_model.encoder.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
871
- "vision_tower.vision_model.encoder.layers.9.layer_norm1.bias": "model-00001-of-00002.safetensors",
872
- "vision_tower.vision_model.encoder.layers.9.layer_norm1.weight": "model-00001-of-00002.safetensors",
873
- "vision_tower.vision_model.encoder.layers.9.layer_norm2.bias": "model-00001-of-00002.safetensors",
874
- "vision_tower.vision_model.encoder.layers.9.layer_norm2.weight": "model-00001-of-00002.safetensors",
875
- "vision_tower.vision_model.encoder.layers.9.mlp.fc1.bias": "model-00001-of-00002.safetensors",
876
- "vision_tower.vision_model.encoder.layers.9.mlp.fc1.weight": "model-00001-of-00002.safetensors",
877
- "vision_tower.vision_model.encoder.layers.9.mlp.fc2.bias": "model-00001-of-00002.safetensors",
878
- "vision_tower.vision_model.encoder.layers.9.mlp.fc2.weight": "model-00001-of-00002.safetensors",
879
- "vision_tower.vision_model.encoder.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
880
- "vision_tower.vision_model.encoder.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
881
- "vision_tower.vision_model.encoder.layers.9.self_attn.out_proj.bias": "model-00001-of-00002.safetensors",
882
- "vision_tower.vision_model.encoder.layers.9.self_attn.out_proj.weight": "model-00001-of-00002.safetensors",
883
- "vision_tower.vision_model.encoder.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
884
- "vision_tower.vision_model.encoder.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
885
- "vision_tower.vision_model.encoder.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
886
- "vision_tower.vision_model.encoder.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
887
- "vision_tower.vision_model.post_layernorm.bias": "model-00001-of-00002.safetensors",
888
- "vision_tower.vision_model.post_layernorm.weight": "model-00001-of-00002.safetensors"
889
- }
890
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
drwlf/Medra4b/preprocessor_config.json DELETED
@@ -1,29 +0,0 @@
1
- {
2
- "do_convert_rgb": null,
3
- "do_normalize": true,
4
- "do_pan_and_scan": null,
5
- "do_rescale": true,
6
- "do_resize": true,
7
- "image_mean": [
8
- 0.5,
9
- 0.5,
10
- 0.5
11
- ],
12
- "image_processor_type": "Gemma3ImageProcessor",
13
- "image_seq_length": 256,
14
- "image_std": [
15
- 0.5,
16
- 0.5,
17
- 0.5
18
- ],
19
- "pan_and_scan_max_num_crops": null,
20
- "pan_and_scan_min_crop_size": null,
21
- "pan_and_scan_min_ratio_to_activate": null,
22
- "processor_class": "Gemma3Processor",
23
- "resample": 2,
24
- "rescale_factor": 0.00392156862745098,
25
- "size": {
26
- "height": 896,
27
- "width": 896
28
- }
29
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
drwlf/Medra4b/processor_config.json DELETED
@@ -1,4 +0,0 @@
1
- {
2
- "image_seq_length": 256,
3
- "processor_class": "Gemma3Processor"
4
- }
 
 
 
 
 
drwlf/Medra4b/special_tokens_map.json DELETED
@@ -1,33 +0,0 @@
1
- {
2
- "boi_token": "<start_of_image>",
3
- "bos_token": {
4
- "content": "<bos>",
5
- "lstrip": false,
6
- "normalized": false,
7
- "rstrip": false,
8
- "single_word": false
9
- },
10
- "eoi_token": "<end_of_image>",
11
- "eos_token": {
12
- "content": "<eos>",
13
- "lstrip": false,
14
- "normalized": false,
15
- "rstrip": false,
16
- "single_word": false
17
- },
18
- "image_token": "<image_soft_token>",
19
- "pad_token": {
20
- "content": "<pad>",
21
- "lstrip": false,
22
- "normalized": false,
23
- "rstrip": false,
24
- "single_word": false
25
- },
26
- "unk_token": {
27
- "content": "<unk>",
28
- "lstrip": false,
29
- "normalized": false,
30
- "rstrip": false,
31
- "single_word": false
32
- }
33
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
drwlf/Medra4b/tokenizer.json DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:4667f2089529e8e7657cfb6d1c19910ae71ff5f28aa7ab2ff2763330affad795
3
- size 33384568
 
 
 
 
drwlf/Medra4b/tokenizer.model DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:1299c11d7cf632ef3b4e11937501358ada021bbdf7c47638d13c0ee982f2e79c
3
- size 4689074
 
 
 
 
drwlf/Medra4b/tokenizer_config.json DELETED
The diff for this file is too large to render. See raw diff